id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_bad_3381_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3381_0 |
crossvul-cpp_data_good_573_0 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rds.h"
/*
* XXX
* - build with sparse
* - should we detect duplicate keys on a socket? hmm.
* - an rdma is an mlock, apply rlimit?
*/
/*
* get the number of pages by looking at the page indices that the start and
* end addresses fall in.
*
* Returns 0 if the vec is invalid. It is invalid if the number of bytes
* causes the address to wrap or overflows an unsigned int. This comes
* from being stored in the 'length' member of 'struct scatterlist'.
*/
static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
{
if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX))
return 0;
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
(vec->addr >> PAGE_SHIFT);
}
static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
struct rds_mr *insert)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rds_mr *mr;
while (*p) {
parent = *p;
mr = rb_entry(parent, struct rds_mr, r_rb_node);
if (key < mr->r_key)
p = &(*p)->rb_left;
else if (key > mr->r_key)
p = &(*p)->rb_right;
else
return mr;
}
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
refcount_inc(&insert->r_refcount);
}
return NULL;
}
/*
* Destroy the transport-specific part of a MR.
*/
static void rds_destroy_mr(struct rds_mr *mr)
{
struct rds_sock *rs = mr->r_sock;
void *trans_private = NULL;
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, refcount_read(&mr->r_refcount));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
trans_private = mr->r_trans_private;
mr->r_trans_private = NULL;
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (trans_private)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
void __rds_put_mr_final(struct rds_mr *mr)
{
rds_destroy_mr(mr);
kfree(mr);
}
/*
* By the time this is called we can't have any more ioctls called on
* the socket so we don't need to worry about racing with others.
*/
void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
unsigned long flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = rb_entry(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
rds_destroy_mr(mr);
rds_mr_put(mr);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/*
* Helper function to pin user pages.
*/
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
int ret;
ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
put_page(pages[ret]);
ret = -EFAULT;
}
return ret;
}
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
u64 *cookie_ret, struct rds_mr **mr_ret)
{
struct rds_mr *mr = NULL, *found;
unsigned int nr_pages;
struct page **pages = NULL;
struct scatterlist *sg;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
unsigned int nents;
long i;
int ret;
if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
goto out;
}
/* Restrict the size of mr irrespective of underlying transport
* To account for unaligned mr regions, subtract one from nr_pages
*/
if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
ret = -EMSGSIZE;
goto out;
}
rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
args->vec.addr, args->vec.bytes, nr_pages);
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
goto out;
}
refcount_set(&mr->r_refcount, 1);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1;
if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/*
* Pin the pages that make up the user buffer and transfer the page
* pointers to the mr's sg array. We check to see if we've mapped
* the whole region after transferring the partial page references
* to the sg array so that we can have one page ref cleanup path.
*
* For now we have no flag that tells us whether the mapping is
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */
for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(sg, nents, rs,
&mr->r_key);
if (IS_ERR(trans_private)) {
for (i = 0 ; i < nents; i++)
put_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
goto out;
}
mr->r_trans_private = trans_private;
rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
mr->r_key, (void *)(unsigned long) args->cookie_addr);
/* The user may pass us an unaligned address, but we can only
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
ret = -EFAULT;
goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
refcount_inc(&mr->r_refcount);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages);
if (mr)
rds_mr_put(mr);
return ret;
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
sizeof(struct rds_get_mr_args)))
return -EFAULT;
return __rds_rdma_map(rs, &args, NULL, NULL);
}
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_for_dest_args args;
struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
sizeof(struct rds_get_mr_for_dest_args)))
return -EFAULT;
/*
* Initially, just behave like get_mr().
* TODO: Implement get_mr as wrapper around this
* and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
return __rds_rdma_map(rs, &new_args, NULL, NULL);
}
/*
* Free the MR indicated by the given R_Key
*/
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_free_mr_args args;
struct rds_mr *mr;
unsigned long flags;
if (optlen != sizeof(struct rds_free_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
sizeof(struct rds_free_mr_args)))
return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */
if (args.cookie == 0) {
if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
return -EINVAL;
rs->rs_transport->flush_mrs();
return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree
* so nobody else finds it.
* This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (!mr)
return -EINVAL;
/*
* call rds_destroy_mr() ourselves so that we're sure it's done by the time
* we return. If we let rds_mr_put() do it it might not happen until
* someone else drops their ref.
*/
rds_destroy_mr(mr);
rds_mr_put(mr);
return 0;
}
/*
* This is called when we receive an extension header that
* tells us this MR was used. It allows us to implement
* use_once semantics
*/
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{
struct rds_mr *mr;
unsigned long flags;
int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr) {
pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
return;
}
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
if (zot_me) {
rds_destroy_mr(mr);
rds_mr_put(mr);
}
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
if (!ro->op_write) {
WARN_ON(!page->mapping && irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
{
struct page *page = sg_page(ao->op_sg);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
set_page_dirty(page);
put_page(page);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
ao->op_active = 0;
}
/*
* Count the number of pages needed to describe an incoming iovec array.
*/
static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
/* figure out the number of pages in the vector */
for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages;
}
int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
if (args->nr_local == 0)
return -EINVAL;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
sizeof(struct rds_iovec)))
return -EFAULT;
nr_pages = rds_pages_in_vec(&vec);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/*
* The application asks for a RDMA transfer.
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
int iov_size;
unsigned int i, j;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out_ret;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out_ret;
}
/* Check whether to allocate the iovec area */
iov_size = args->nr_local * sizeof(struct rds_iovec);
if (args->nr_local > UIO_FASTIOV) {
iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
if (!iovs) {
ret = -ENOMEM;
goto out_ret;
}
}
if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
ret = -EFAULT;
goto out;
}
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
goto out;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (!op->op_sg) {
ret = -ENOMEM;
goto out;
}
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
/* Enable rmda notification on data operation for composite
* rds messages and make sure notification is enabled only
* for the data operation which follows it so that application
* gets notified only after full message gets delivered.
*/
if (rm->data.op_sg) {
rm->rdma.op_notify = 0;
rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
}
}
/* The cookie contains the R_Key of the remote memory region, and
* optionally an offset into it. This is how we implement RDMA into
* unaligned memory.
* When setting up the RDMA, we need to add that offset to the
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
for (i = 0; i < args->nr_local; i++) {
struct rds_iovec *iov = &iovs[i];
/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
unsigned int nr = rds_pages_in_vec(iov);
rs->rs_user_addr = iov->addr;
rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
else
ret = 0;
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
iov->addr += sg->length;
iov->bytes -= sg->length;
}
op->op_nents += nr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
goto out;
}
op->op_bytes = nr_bytes;
out:
if (iovs != iovstack)
sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
out_ret:
if (ret)
rds_rdma_free_op(op);
else
rds_stats_inc(s_send_rdma);
return ret;
}
/*
* The application wants us to pass an RDMA destination (aka MR)
* to the remote
*/
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
unsigned long flags;
struct rds_mr *mr;
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
/* We are reusing a previously mapped MR here. Most likely, the
* application has written to the buffer, so we need to explicitly
* flush those writes to RAM. Otherwise the HCA may not see them
* when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr)
err = -EINVAL; /* invalid r_key */
else
refcount_inc(&mr->r_refcount);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
}
/*
* The application passes us an address range it wants to enable RDMA
* to/from. We map the area, and save the <R_Key,offset> pair
* in rm->m_rdma_cookie. This causes it to be sent along to the peer
* in an extension header.
*/
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
}
/*
* Fill in rds_message for an atomic request.
*/
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct page *page = NULL;
struct rds_atomic_args *args;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
/* Nonmasked & masked cmsg ops converted to masked hw ops */
switch (cmsg->cmsg_type) {
case RDS_CMSG_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = 0;
break;
case RDS_CMSG_MASKED_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->m_fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
break;
case RDS_CMSG_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->cswp.compare;
rm->atomic.op_m_cswp.swap = args->cswp.swap;
rm->atomic.op_m_cswp.compare_mask = ~0;
rm->atomic.op_m_cswp.swap_mask = ~0;
break;
case RDS_CMSG_MASKED_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
break;
default:
BUG(); /* should never happen */
}
rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
if (!rm->atomic.op_sg) {
ret = -ENOMEM;
goto err;
}
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
ret = -EFAULT;
goto err;
}
ret = rds_pin_pages(args->local_addr, 1, &page, 1);
if (ret != 1)
goto err;
ret = 0;
sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
if (rm->atomic.op_notify || rm->atomic.op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
}
rm->atomic.op_notifier->n_user_token = args->user_token;
rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
}
rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
return ret;
err:
if (page)
put_page(page);
rm->atomic.op_active = 0;
kfree(rm->atomic.op_notifier);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_573_0 |
crossvul-cpp_data_bad_212_0 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_inode.h"
#include "xfs_error.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_inode_item.h"
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_bmap_util.h"
#include "xfs_dquot_item.h"
#include "xfs_dquot.h"
#include "xfs_reflink.h"
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/iversion.h>
/*
* Allocate and initialise an xfs_inode.
*/
struct xfs_inode *
xfs_inode_alloc(
struct xfs_mount *mp,
xfs_ino_t ino)
{
struct xfs_inode *ip;
/*
* if this didn't occur in transactions, we could use
* KM_MAYFAIL and return NULL here on ENOMEM. Set the
* code up to do this anyway.
*/
ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
if (!ip)
return NULL;
if (inode_init_always(mp->m_super, VFS_I(ip))) {
kmem_zone_free(xfs_inode_zone, ip);
return NULL;
}
/* VFS doesn't initialise i_mode! */
VFS_I(ip)->i_mode = 0;
XFS_STATS_INC(mp, vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!xfs_isiflocked(ip));
ASSERT(ip->i_ino == 0);
/* initialise the xfs inode */
ip->i_ino = ino;
ip->i_mount = mp;
memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
ip->i_afp = NULL;
ip->i_cowfp = NULL;
ip->i_cnextents = 0;
ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
ip->i_flags = 0;
ip->i_delayed_blks = 0;
memset(&ip->i_d, 0, sizeof(ip->i_d));
return ip;
}
STATIC void
xfs_inode_free_callback(
struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
struct xfs_inode *ip = XFS_I(inode);
switch (VFS_I(ip)->i_mode & S_IFMT) {
case S_IFREG:
case S_IFDIR:
case S_IFLNK:
xfs_idestroy_fork(ip, XFS_DATA_FORK);
break;
}
if (ip->i_afp)
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
if (ip->i_cowfp)
xfs_idestroy_fork(ip, XFS_COW_FORK);
if (ip->i_itemp) {
ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
xfs_inode_item_destroy(ip);
ip->i_itemp = NULL;
}
kmem_zone_free(xfs_inode_zone, ip);
}
static void
__xfs_inode_free(
struct xfs_inode *ip)
{
/* asserts to verify all state is correct here */
ASSERT(atomic_read(&ip->i_pincount) == 0);
XFS_STATS_DEC(ip->i_mount, vn_active);
call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
}
void
xfs_inode_free(
struct xfs_inode *ip)
{
ASSERT(!xfs_isiflocked(ip));
/*
* Because we use RCU freeing we need to ensure the inode always
* appears to be reclaimed with an invalid inode number when in the
* free state. The ip->i_flags_lock provides the barrier against lookup
* races.
*/
spin_lock(&ip->i_flags_lock);
ip->i_flags = XFS_IRECLAIM;
ip->i_ino = 0;
spin_unlock(&ip->i_flags_lock);
__xfs_inode_free(ip);
}
/*
* Queue a new inode reclaim pass if there are reclaimable inodes and there
* isn't a reclaim pass already in progress. By default it runs every 5s based
* on the xfs periodic sync default of 30s. Perhaps this should have it's own
* tunable, but that can be done if this method proves to be ineffective or too
* aggressive.
*/
static void
xfs_reclaim_work_queue(
struct xfs_mount *mp)
{
rcu_read_lock();
if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
}
rcu_read_unlock();
}
/*
* This is a fast pass over the inode cache to try to get reclaim moving on as
* many inodes as possible in a short period of time. It kicks itself every few
* seconds, as well as being kicked by the inode cache shrinker when memory
* goes low. It scans as quickly as possible avoiding locked inodes or those
* already being flushed, and once done schedules a future pass.
*/
void
xfs_reclaim_worker(
struct work_struct *work)
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_reclaim_work);
xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
xfs_reclaim_work_queue(mp);
}
static void
xfs_perag_set_reclaim_tag(
struct xfs_perag *pag)
{
struct xfs_mount *mp = pag->pag_mount;
lockdep_assert_held(&pag->pag_ici_lock);
if (pag->pag_ici_reclaimable++)
return;
/* propagate the reclaim tag up into the perag radix tree */
spin_lock(&mp->m_perag_lock);
radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
XFS_ICI_RECLAIM_TAG);
spin_unlock(&mp->m_perag_lock);
/* schedule periodic background inode reclaim */
xfs_reclaim_work_queue(mp);
trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
}
static void
xfs_perag_clear_reclaim_tag(
struct xfs_perag *pag)
{
struct xfs_mount *mp = pag->pag_mount;
lockdep_assert_held(&pag->pag_ici_lock);
if (--pag->pag_ici_reclaimable)
return;
/* clear the reclaim tag from the perag radix tree */
spin_lock(&mp->m_perag_lock);
radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
XFS_ICI_RECLAIM_TAG);
spin_unlock(&mp->m_perag_lock);
trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
}
/*
* We set the inode flag atomically with the radix tree tag.
* Once we get tag lookups on the radix tree, this inode flag
* can go away.
*/
void
xfs_inode_set_reclaim_tag(
struct xfs_inode *ip)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
XFS_ICI_RECLAIM_TAG);
xfs_perag_set_reclaim_tag(pag);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
}
STATIC void
xfs_inode_clear_reclaim_tag(
struct xfs_perag *pag,
xfs_ino_t ino)
{
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(pag->pag_mount, ino),
XFS_ICI_RECLAIM_TAG);
xfs_perag_clear_reclaim_tag(pag);
}
static void
xfs_inew_wait(
struct xfs_inode *ip)
{
wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
do {
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
if (!xfs_iflags_test(ip, XFS_INEW))
break;
schedule();
} while (true);
finish_wait(wq, &wait.wq_entry);
}
/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
* information about the on-disk values in the VFS inode and so we can't just
* overwrite the values unconditionally. Hence we save the parameters we
* need to retain across reinitialisation, and rewrite them into the VFS inode
* after reinitialisation even if it fails.
*/
static int
xfs_reinit_inode(
struct xfs_mount *mp,
struct inode *inode)
{
int error;
uint32_t nlink = inode->i_nlink;
uint32_t generation = inode->i_generation;
uint64_t version = inode_peek_iversion(inode);
umode_t mode = inode->i_mode;
dev_t dev = inode->i_rdev;
error = inode_init_always(mp->m_super, inode);
set_nlink(inode, nlink);
inode->i_generation = generation;
inode_set_iversion_queried(inode, version);
inode->i_mode = mode;
inode->i_rdev = dev;
return error;
}
/*
* Check the validity of the inode we just found it the cache
*/
static int
xfs_iget_cache_hit(
struct xfs_perag *pag,
struct xfs_inode *ip,
xfs_ino_t ino,
int flags,
int lock_flags) __releases(RCU)
{
struct inode *inode = VFS_I(ip);
struct xfs_mount *mp = ip->i_mount;
int error;
/*
* check for re-use of an inode within an RCU grace period due to the
* radix tree nodes not being updated yet. We monitor for this by
* setting the inode number to zero before freeing the inode structure.
* If the inode has been reallocated and set up, then the inode number
* will not match, so check for that, too.
*/
spin_lock(&ip->i_flags_lock);
if (ip->i_ino != ino) {
trace_xfs_iget_skip(ip);
XFS_STATS_INC(mp, xs_ig_frecycle);
error = -EAGAIN;
goto out_error;
}
/*
* If we are racing with another cache hit that is currently
* instantiating this inode or currently recycling it out of
* reclaimabe state, wait for the initialisation to complete
* before continuing.
*
* XXX(hch): eventually we should do something equivalent to
* wait_on_inode to wait for these flags to be cleared
* instead of polling for it.
*/
if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
trace_xfs_iget_skip(ip);
XFS_STATS_INC(mp, xs_ig_frecycle);
error = -EAGAIN;
goto out_error;
}
/*
* If lookup is racing with unlink return an error immediately.
*/
if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
error = -ENOENT;
goto out_error;
}
/*
* If IRECLAIMABLE is set, we've torn down the VFS inode already.
* Need to carefully get it back into useable state.
*/
if (ip->i_flags & XFS_IRECLAIMABLE) {
trace_xfs_iget_reclaim(ip);
if (flags & XFS_IGET_INCORE) {
error = -EAGAIN;
goto out_error;
}
/*
* We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
* from stomping over us while we recycle the inode. We can't
* clear the radix tree reclaimable tag yet as it requires
* pag_ici_lock to be held exclusive.
*/
ip->i_flags |= XFS_IRECLAIM;
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
error = xfs_reinit_inode(mp, inode);
if (error) {
bool wake;
/*
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
wake = !!__xfs_iflags_test(ip, XFS_INEW);
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
if (wake)
wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
trace_xfs_iget_reclaim_fail(ip);
goto out_error;
}
spin_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
/*
* Clear the per-lifetime state in the inode as we are now
* effectively a new inode and need to return to the initial
* state before reuse occurs.
*/
ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
ip->i_flags |= XFS_INEW;
xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
inode->i_state = I_NEW;
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
init_rwsem(&inode->i_rwsem);
spin_unlock(&ip->i_flags_lock);
spin_unlock(&pag->pag_ici_lock);
} else {
/* If the VFS inode is being torn down, pause and try again. */
if (!igrab(inode)) {
trace_xfs_iget_skip(ip);
error = -EAGAIN;
goto out_error;
}
/* We've got a live one. */
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
trace_xfs_iget_hit(ip);
}
if (lock_flags != 0)
xfs_ilock(ip, lock_flags);
if (!(flags & XFS_IGET_INCORE))
xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
XFS_STATS_INC(mp, xs_ig_found);
return 0;
out_error:
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
return error;
}
static int
xfs_iget_cache_miss(
struct xfs_mount *mp,
struct xfs_perag *pag,
xfs_trans_t *tp,
xfs_ino_t ino,
struct xfs_inode **ipp,
int flags,
int lock_flags)
{
struct xfs_inode *ip;
int error;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
int iflags;
ip = xfs_inode_alloc(mp, ino);
if (!ip)
return -ENOMEM;
error = xfs_iread(mp, tp, ip, flags);
if (error)
goto out_destroy;
if (!xfs_inode_verify_forks(ip)) {
error = -EFSCORRUPTED;
goto out_destroy;
}
trace_xfs_iget_miss(ip);
/*
* If we are allocating a new inode, then check what was returned is
* actually a free, empty inode. If we are not allocating an inode,
* the check we didn't find a free inode.
*/
if (flags & XFS_IGET_CREATE) {
if (VFS_I(ip)->i_mode != 0) {
xfs_warn(mp,
"Corruption detected! Free inode 0x%llx not marked free on disk",
ino);
error = -EFSCORRUPTED;
goto out_destroy;
}
if (ip->i_d.di_nblocks != 0) {
xfs_warn(mp,
"Corruption detected! Free inode 0x%llx has blocks allocated!",
ino);
error = -EFSCORRUPTED;
goto out_destroy;
}
} else if (VFS_I(ip)->i_mode == 0) {
error = -ENOENT;
goto out_destroy;
}
/*
* Preload the radix tree so we can insert safely under the
* write spinlock. Note that we cannot sleep inside the preload
* region. Since we can be called from transaction context, don't
* recurse into the file system.
*/
if (radix_tree_preload(GFP_NOFS)) {
error = -EAGAIN;
goto out_destroy;
}
/*
* Because the inode hasn't been added to the radix-tree yet it can't
* be found by another thread, so we can do the non-sleeping lock here.
*/
if (lock_flags) {
if (!xfs_ilock_nowait(ip, lock_flags))
BUG();
}
/*
* These values must be set before inserting the inode into the radix
* tree as the moment it is inserted a concurrent lookup (allowed by the
* RCU locking mechanism) can find it and that lookup must see that this
* is an inode currently under construction (i.e. that XFS_INEW is set).
* The ip->i_flags_lock that protects the XFS_INEW flag forms the
* memory barrier that ensures this detection works correctly at lookup
* time.
*/
iflags = XFS_INEW;
if (flags & XFS_IGET_DONTCACHE)
iflags |= XFS_IDONTCACHE;
ip->i_udquot = NULL;
ip->i_gdquot = NULL;
ip->i_pdquot = NULL;
xfs_iflags_set(ip, iflags);
/* insert the new inode */
spin_lock(&pag->pag_ici_lock);
error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
if (unlikely(error)) {
WARN_ON(error != -EEXIST);
XFS_STATS_INC(mp, xs_ig_dup);
error = -EAGAIN;
goto out_preload_end;
}
spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
*ipp = ip;
return 0;
out_preload_end:
spin_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
if (lock_flags)
xfs_iunlock(ip, lock_flags);
out_destroy:
__destroy_inode(VFS_I(ip));
xfs_inode_free(ip);
return error;
}
/*
* Look up an inode by number in the given file system.
* The inode is looked up in the cache held in each AG.
* If the inode is found in the cache, initialise the vfs inode
* if necessary.
*
* If it is not in core, read it in from the file system's device,
* add it to the cache and initialise the vfs inode.
*
* The inode is locked according to the value of the lock_flags parameter.
* This flag parameter indicates how and if the inode's IO lock and inode lock
* should be taken.
*
* mp -- the mount point structure for the current file system. It points
* to the inode hash table.
* tp -- a pointer to the current transaction if there is one. This is
* simply passed through to the xfs_iread() call.
* ino -- the number of the inode desired. This is the unique identifier
* within the file system for the inode being requested.
* lock_flags -- flags indicating how to lock the inode. See the comment
* for xfs_ilock() for a list of valid values.
*/
int
xfs_iget(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_ino_t ino,
uint flags,
uint lock_flags,
xfs_inode_t **ipp)
{
xfs_inode_t *ip;
int error;
xfs_perag_t *pag;
xfs_agino_t agino;
/*
* xfs_reclaim_inode() uses the ILOCK to ensure an inode
* doesn't get freed while it's being referenced during a
* radix tree traversal here. It assumes this function
* aqcuires only the ILOCK (and therefore it has no need to
* involve the IOLOCK in this synchronization).
*/
ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
/* reject inode numbers outside existing AGs */
if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
return -EINVAL;
XFS_STATS_INC(mp, xs_ig_attempts);
/* get the perag structure and ensure that it's inode capable */
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
agino = XFS_INO_TO_AGINO(mp, ino);
again:
error = 0;
rcu_read_lock();
ip = radix_tree_lookup(&pag->pag_ici_root, agino);
if (ip) {
error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
if (error)
goto out_error_or_again;
} else {
rcu_read_unlock();
if (flags & XFS_IGET_INCORE) {
error = -ENODATA;
goto out_error_or_again;
}
XFS_STATS_INC(mp, xs_ig_missed);
error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
flags, lock_flags);
if (error)
goto out_error_or_again;
}
xfs_perag_put(pag);
*ipp = ip;
/*
* If we have a real type for an on-disk inode, we can setup the inode
* now. If it's a new inode being created, xfs_ialloc will handle it.
*/
if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
xfs_setup_existing_inode(ip);
return 0;
out_error_or_again:
if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
delay(1);
goto again;
}
xfs_perag_put(pag);
return error;
}
/*
* "Is this a cached inode that's also allocated?"
*
* Look up an inode by number in the given file system. If the inode is
* in cache and isn't in purgatory, return 1 if the inode is allocated
* and 0 if it is not. For all other cases (not in cache, being torn
* down, etc.), return a negative error code.
*
* The caller has to prevent inode allocation and freeing activity,
* presumably by locking the AGI buffer. This is to ensure that an
* inode cannot transition from allocated to freed until the caller is
* ready to allow that. If the inode is in an intermediate state (new,
* reclaimable, or being reclaimed), -EAGAIN will be returned; if the
* inode is not in the cache, -ENOENT will be returned. The caller must
* deal with these scenarios appropriately.
*
* This is a specialized use case for the online scrubber; if you're
* reading this, you probably want xfs_iget.
*/
int
xfs_icache_inode_is_allocated(
struct xfs_mount *mp,
struct xfs_trans *tp,
xfs_ino_t ino,
bool *inuse)
{
struct xfs_inode *ip;
int error;
error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
if (error)
return error;
*inuse = !!(VFS_I(ip)->i_mode);
IRELE(ip);
return 0;
}
/*
* The inode lookup is done in batches to keep the amount of lock traffic and
* radix tree lookups to a minimum. The batch size is a trade off between
* lookup reduction and stack usage. This is in the reclaim path, so we can't
* be too greedy.
*/
#define XFS_LOOKUP_BATCH 32
STATIC int
xfs_inode_ag_walk_grab(
struct xfs_inode *ip,
int flags)
{
struct inode *inode = VFS_I(ip);
bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
ASSERT(rcu_read_lock_held());
/*
* check for stale RCU freed inode
*
* If the inode has been reallocated, it doesn't matter if it's not in
* the AG we are walking - we are walking for writeback, so if it
* passes all the "valid inode" checks and is dirty, then we'll write
* it back anyway. If it has been reallocated and still being
* initialised, the XFS_INEW check below will catch it.
*/
spin_lock(&ip->i_flags_lock);
if (!ip->i_ino)
goto out_unlock_noent;
/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
__xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
goto out_unlock_noent;
spin_unlock(&ip->i_flags_lock);
/* nothing to sync during shutdown */
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EFSCORRUPTED;
/* If we can't grab the inode, it must on it's way to reclaim. */
if (!igrab(inode))
return -ENOENT;
/* inode is valid */
return 0;
out_unlock_noent:
spin_unlock(&ip->i_flags_lock);
return -ENOENT;
}
STATIC int
xfs_inode_ag_walk(
struct xfs_mount *mp,
struct xfs_perag *pag,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
void *args,
int tag,
int iter_flags)
{
uint32_t first_index;
int last_error = 0;
int skipped;
int done;
int nr_found;
restart:
done = 0;
skipped = 0;
first_index = 0;
nr_found = 0;
do {
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
int error = 0;
int i;
rcu_read_lock();
if (tag == -1)
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH);
else
nr_found = radix_tree_gang_lookup_tag(
&pag->pag_ici_root,
(void **) batch, first_index,
XFS_LOOKUP_BATCH, tag);
if (!nr_found) {
rcu_read_unlock();
break;
}
/*
* Grab the inodes before we drop the lock. if we found
* nothing, nr == 0 and the loop will be skipped.
*/
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
batch[i] = NULL;
/*
* Update the index for the next lookup. Catch
* overflows into the next AG range which can occur if
* we have inodes in the last block of the AG and we
* are currently pointing to the last inode.
*
* Because we may see inodes that are from the wrong AG
* due to RCU freeing and reallocation, only update the
* index if it lies in this AG. It was a race that lead
* us to see this inode, so another lookup from the
* same index will not find it again.
*/
if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
done = 1;
}
/* unlock now we've grabbed the inodes. */
rcu_read_unlock();
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
xfs_iflags_test(batch[i], XFS_INEW))
xfs_inew_wait(batch[i]);
error = execute(batch[i], flags, args);
IRELE(batch[i]);
if (error == -EAGAIN) {
skipped++;
continue;
}
if (error && last_error != -EFSCORRUPTED)
last_error = error;
}
/* bail out if the filesystem is corrupted. */
if (error == -EFSCORRUPTED)
break;
cond_resched();
} while (nr_found && !done);
if (skipped) {
delay(1);
goto restart;
}
return last_error;
}
/*
* Background scanning to trim post-EOF preallocated space. This is queued
* based on the 'speculative_prealloc_lifetime' tunable (5m by default).
*/
void
xfs_queue_eofblocks(
struct xfs_mount *mp)
{
rcu_read_lock();
if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
queue_delayed_work(mp->m_eofblocks_workqueue,
&mp->m_eofblocks_work,
msecs_to_jiffies(xfs_eofb_secs * 1000));
rcu_read_unlock();
}
void
xfs_eofblocks_worker(
struct work_struct *work)
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_eofblocks_work);
xfs_icache_free_eofblocks(mp, NULL);
xfs_queue_eofblocks(mp);
}
/*
* Background scanning to trim preallocated CoW space. This is queued
* based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
* (We'll just piggyback on the post-EOF prealloc space workqueue.)
*/
void
xfs_queue_cowblocks(
struct xfs_mount *mp)
{
rcu_read_lock();
if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
queue_delayed_work(mp->m_eofblocks_workqueue,
&mp->m_cowblocks_work,
msecs_to_jiffies(xfs_cowb_secs * 1000));
rcu_read_unlock();
}
void
xfs_cowblocks_worker(
struct work_struct *work)
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_cowblocks_work);
xfs_icache_free_cowblocks(mp, NULL);
xfs_queue_cowblocks(mp);
}
int
xfs_inode_ag_iterator_flags(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
void *args,
int iter_flags)
{
struct xfs_perag *pag;
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
ag = 0;
while ((pag = xfs_perag_get(mp, ag))) {
ag = pag->pag_agno + 1;
error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
iter_flags);
xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == -EFSCORRUPTED)
break;
}
}
return last_error;
}
int
xfs_inode_ag_iterator(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
void *args)
{
return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
}
int
xfs_inode_ag_iterator_tag(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
void *args,
int tag)
{
struct xfs_perag *pag;
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
ag = 0;
while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
ag = pag->pag_agno + 1;
error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
0);
xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == -EFSCORRUPTED)
break;
}
}
return last_error;
}
/*
* Grab the inode for reclaim exclusively.
* Return 0 if we grabbed it, non-zero otherwise.
*/
STATIC int
xfs_reclaim_inode_grab(
struct xfs_inode *ip,
int flags)
{
ASSERT(rcu_read_lock_held());
/* quick check for stale RCU freed inode */
if (!ip->i_ino)
return 1;
/*
* If we are asked for non-blocking operation, do unlocked checks to
* see if the inode already is being flushed or in reclaim to avoid
* lock traffic.
*/
if ((flags & SYNC_TRYLOCK) &&
__xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
return 1;
/*
* The radix tree lock here protects a thread in xfs_iget from racing
* with us starting reclaim on the inode. Once we have the
* XFS_IRECLAIM flag set it will not touch us.
*
* Due to RCU lookup, we may find inodes that have been freed and only
* have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
* aren't candidates for reclaim at all, so we must check the
* XFS_IRECLAIMABLE is set first before proceeding to reclaim.
*/
spin_lock(&ip->i_flags_lock);
if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
__xfs_iflags_test(ip, XFS_IRECLAIM)) {
/* not a reclaim candidate. */
spin_unlock(&ip->i_flags_lock);
return 1;
}
__xfs_iflags_set(ip, XFS_IRECLAIM);
spin_unlock(&ip->i_flags_lock);
return 0;
}
/*
* Inodes in different states need to be treated differently. The following
* table lists the inode states and the reclaim actions necessary:
*
* inode state iflush ret required action
* --------------- ---------- ---------------
* bad - reclaim
* shutdown EIO unpin and reclaim
* clean, unpinned 0 reclaim
* stale, unpinned 0 reclaim
* clean, pinned(*) 0 requeue
* stale, pinned EAGAIN requeue
* dirty, async - requeue
* dirty, sync 0 reclaim
*
* (*) dgc: I don't think the clean, pinned state is possible but it gets
* handled anyway given the order of checks implemented.
*
* Also, because we get the flush lock first, we know that any inode that has
* been flushed delwri has had the flush completed by the time we check that
* the inode is clean.
*
* Note that because the inode is flushed delayed write by AIL pushing, the
* flush lock may already be held here and waiting on it can result in very
* long latencies. Hence for sync reclaims, where we wait on the flush lock,
* the caller should push the AIL first before trying to reclaim inodes to
* minimise the amount of time spent waiting. For background relaim, we only
* bother to reclaim clean inodes anyway.
*
* Hence the order of actions after gaining the locks should be:
* bad => reclaim
* shutdown => unpin and reclaim
* pinned, async => requeue
* pinned, sync => unpin
* stale => reclaim
* clean => reclaim
* dirty, async => requeue
* dirty, sync => flush, wait and reclaim
*/
STATIC int
xfs_reclaim_inode(
struct xfs_inode *ip,
struct xfs_perag *pag,
int sync_mode)
{
struct xfs_buf *bp = NULL;
xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
int error;
restart:
error = 0;
xfs_ilock(ip, XFS_ILOCK_EXCL);
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out;
xfs_iflock(ip);
}
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
xfs_iunpin_wait(ip);
/* xfs_iflush_abort() drops the flush lock */
xfs_iflush_abort(ip, false);
goto reclaim;
}
if (xfs_ipincount(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out_ifunlock;
xfs_iunpin_wait(ip);
}
if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
goto reclaim;
}
/*
* Never flush out dirty data during non-blocking reclaim, as it would
* just contend with AIL pushing trying to do the same job.
*/
if (!(sync_mode & SYNC_WAIT))
goto out_ifunlock;
/*
* Now we have an inode that needs flushing.
*
* Note that xfs_iflush will never block on the inode buffer lock, as
* xfs_ifree_cluster() can lock the inode buffer before it locks the
* ip->i_lock, and we are doing the exact opposite here. As a result,
* doing a blocking xfs_imap_to_bp() to get the cluster buffer would
* result in an ABBA deadlock with xfs_ifree_cluster().
*
* As xfs_ifree_cluser() must gather all inodes that are active in the
* cache to mark them stale, if we hit this case we don't actually want
* to do IO here - we want the inode marked stale so we can simply
* reclaim it. Hence if we get an EAGAIN error here, just unlock the
* inode, back off and try again. Hopefully the next pass through will
* see the stale flag set on the inode.
*/
error = xfs_iflush(ip, &bp);
if (error == -EAGAIN) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/* backoff longer than in xfs_ifree_cluster */
delay(2);
goto restart;
}
if (!error) {
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
}
reclaim:
ASSERT(!xfs_isiflocked(ip));
/*
* Because we use RCU freeing we need to ensure the inode always appears
* to be reclaimed with an invalid inode number when in the free state.
* We do this as early as possible under the ILOCK so that
* xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
* detect races with us here. By doing this, we guarantee that once
* xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
* it will see either a valid inode that will serialise correctly, or it
* will see an invalid inode that it can skip.
*/
spin_lock(&ip->i_flags_lock);
ip->i_flags = XFS_IRECLAIM;
ip->i_ino = 0;
spin_unlock(&ip->i_flags_lock);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
/*
* Remove the inode from the per-AG radix tree.
*
* Because radix_tree_delete won't complain even if the item was never
* added to the tree assert that it's been there before to catch
* problems with the inode life time early on.
*/
spin_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ino)))
ASSERT(0);
xfs_perag_clear_reclaim_tag(pag);
spin_unlock(&pag->pag_ici_lock);
/*
* Here we do an (almost) spurious inode lock in order to coordinate
* with inode cache radix tree lookups. This is because the lookup
* can reference the inodes in the cache without taking references.
*
* We make that OK here by ensuring that we wait until the inode is
* unlocked after the lookup before we go ahead and free it.
*/
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_qm_dqdetach(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
__xfs_inode_free(ip);
return error;
out_ifunlock:
xfs_ifunlock(ip);
out:
xfs_iflags_clear(ip, XFS_IRECLAIM);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/*
* We could return -EAGAIN here to make reclaim rescan the inode tree in
* a short while. However, this just burns CPU time scanning the tree
* waiting for IO to complete and the reclaim work never goes back to
* the idle state. Instead, return 0 to let the next scheduled
* background reclaim attempt to reclaim the inode again.
*/
return 0;
}
/*
* Walk the AGs and reclaim the inodes in them. Even if the filesystem is
* corrupted, we still want to try to reclaim all the inodes. If we don't,
* then a shut down during filesystem unmount reclaim walk leak all the
* unreclaimed inodes.
*/
STATIC int
xfs_reclaim_inodes_ag(
struct xfs_mount *mp,
int flags,
int *nr_to_scan)
{
struct xfs_perag *pag;
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
int trylock = flags & SYNC_TRYLOCK;
int skipped;
restart:
ag = 0;
skipped = 0;
while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
unsigned long first_index = 0;
int done = 0;
int nr_found = 0;
ag = pag->pag_agno + 1;
if (trylock) {
if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
skipped++;
xfs_perag_put(pag);
continue;
}
first_index = pag->pag_ici_reclaim_cursor;
} else
mutex_lock(&pag->pag_ici_reclaim_lock);
do {
struct xfs_inode *batch[XFS_LOOKUP_BATCH];
int i;
rcu_read_lock();
nr_found = radix_tree_gang_lookup_tag(
&pag->pag_ici_root,
(void **)batch, first_index,
XFS_LOOKUP_BATCH,
XFS_ICI_RECLAIM_TAG);
if (!nr_found) {
done = 1;
rcu_read_unlock();
break;
}
/*
* Grab the inodes before we drop the lock. if we found
* nothing, nr == 0 and the loop will be skipped.
*/
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
if (done || xfs_reclaim_inode_grab(ip, flags))
batch[i] = NULL;
/*
* Update the index for the next lookup. Catch
* overflows into the next AG range which can
* occur if we have inodes in the last block of
* the AG and we are currently pointing to the
* last inode.
*
* Because we may see inodes that are from the
* wrong AG due to RCU freeing and
* reallocation, only update the index if it
* lies in this AG. It was a race that lead us
* to see this inode, so another lookup from
* the same index will not find it again.
*/
if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
pag->pag_agno)
continue;
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
done = 1;
}
/* unlock now we've grabbed the inodes. */
rcu_read_unlock();
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
error = xfs_reclaim_inode(batch[i], pag, flags);
if (error && last_error != -EFSCORRUPTED)
last_error = error;
}
*nr_to_scan -= XFS_LOOKUP_BATCH;
cond_resched();
} while (nr_found && !done && *nr_to_scan > 0);
if (trylock && !done)
pag->pag_ici_reclaim_cursor = first_index;
else
pag->pag_ici_reclaim_cursor = 0;
mutex_unlock(&pag->pag_ici_reclaim_lock);
xfs_perag_put(pag);
}
/*
* if we skipped any AG, and we still have scan count remaining, do
* another pass this time using blocking reclaim semantics (i.e
* waiting on the reclaim locks and ignoring the reclaim cursors). This
* ensure that when we get more reclaimers than AGs we block rather
* than spin trying to execute reclaim.
*/
if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
trylock = 0;
goto restart;
}
return last_error;
}
int
xfs_reclaim_inodes(
xfs_mount_t *mp,
int mode)
{
int nr_to_scan = INT_MAX;
return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
}
/*
* Scan a certain number of inodes for reclaim.
*
* When called we make sure that there is a background (fast) inode reclaim in
* progress, while we will throttle the speed of reclaim via doing synchronous
* reclaim of inodes. That means if we come across dirty inodes, we wait for
* them to be cleaned, which we hope will not be very long due to the
* background walker having already kicked the IO off on those dirty inodes.
*/
long
xfs_reclaim_inodes_nr(
struct xfs_mount *mp,
int nr_to_scan)
{
/* kick background reclaimer and push the AIL */
xfs_reclaim_work_queue(mp);
xfs_ail_push_all(mp->m_ail);
return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
}
/*
* Return the number of reclaimable inodes in the filesystem for
* the shrinker to determine how much to reclaim.
*/
int
xfs_reclaim_inodes_count(
struct xfs_mount *mp)
{
struct xfs_perag *pag;
xfs_agnumber_t ag = 0;
int reclaimable = 0;
while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
ag = pag->pag_agno + 1;
reclaimable += pag->pag_ici_reclaimable;
xfs_perag_put(pag);
}
return reclaimable;
}
STATIC int
xfs_inode_match_id(
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
!uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
return 0;
if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
!gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
return 0;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
xfs_get_projid(ip) != eofb->eof_prid)
return 0;
return 1;
}
/*
* A union-based inode filtering algorithm. Process the inode if any of the
* criteria match. This is for global/internal scans only.
*/
STATIC int
xfs_inode_match_id_union(
struct xfs_inode *ip,
struct xfs_eofblocks *eofb)
{
if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
return 1;
if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
return 1;
if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
xfs_get_projid(ip) == eofb->eof_prid)
return 1;
return 0;
}
STATIC int
xfs_inode_free_eofblocks(
struct xfs_inode *ip,
int flags,
void *args)
{
int ret = 0;
struct xfs_eofblocks *eofb = args;
int match;
if (!xfs_can_free_eofblocks(ip, false)) {
/* inode could be preallocated or append-only */
trace_xfs_inode_free_eofblocks_invalid(ip);
xfs_inode_clear_eofblocks_tag(ip);
return 0;
}
/*
* If the mapping is dirty the operation can block and wait for some
* time. Unless we are waiting, skip it.
*/
if (!(flags & SYNC_WAIT) &&
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
return 0;
if (eofb) {
if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
match = xfs_inode_match_id_union(ip, eofb);
else
match = xfs_inode_match_id(ip, eofb);
if (!match)
return 0;
/* skip the inode if the file size is too small */
if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
XFS_ISIZE(ip) < eofb->eof_min_file_size)
return 0;
}
/*
* If the caller is waiting, return -EAGAIN to keep the background
* scanner moving and revisit the inode in a subsequent pass.
*/
if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
if (flags & SYNC_WAIT)
ret = -EAGAIN;
return ret;
}
ret = xfs_free_eofblocks(ip);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
static int
__xfs_icache_free_eofblocks(
struct xfs_mount *mp,
struct xfs_eofblocks *eofb,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int tag)
{
int flags = SYNC_TRYLOCK;
if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
flags = SYNC_WAIT;
return xfs_inode_ag_iterator_tag(mp, execute, flags,
eofb, tag);
}
int
xfs_icache_free_eofblocks(
struct xfs_mount *mp,
struct xfs_eofblocks *eofb)
{
return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
XFS_ICI_EOFBLOCKS_TAG);
}
/*
* Run eofblocks scans on the quotas applicable to the inode. For inodes with
* multiple quotas, we don't know exactly which quota caused an allocation
* failure. We make a best effort by including each quota under low free space
* conditions (less than 1% free space) in the scan.
*/
static int
__xfs_inode_free_quota_eofblocks(
struct xfs_inode *ip,
int (*execute)(struct xfs_mount *mp,
struct xfs_eofblocks *eofb))
{
int scan = 0;
struct xfs_eofblocks eofb = {0};
struct xfs_dquot *dq;
/*
* Run a sync scan to increase effectiveness and use the union filter to
* cover all applicable quotas in a single scan.
*/
eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
dq = xfs_inode_dquot(ip, XFS_DQ_USER);
if (dq && xfs_dquot_lowsp(dq)) {
eofb.eof_uid = VFS_I(ip)->i_uid;
eofb.eof_flags |= XFS_EOF_FLAGS_UID;
scan = 1;
}
}
if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
if (dq && xfs_dquot_lowsp(dq)) {
eofb.eof_gid = VFS_I(ip)->i_gid;
eofb.eof_flags |= XFS_EOF_FLAGS_GID;
scan = 1;
}
}
if (scan)
execute(ip->i_mount, &eofb);
return scan;
}
int
xfs_inode_free_quota_eofblocks(
struct xfs_inode *ip)
{
return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
}
static inline unsigned long
xfs_iflag_for_tag(
int tag)
{
switch (tag) {
case XFS_ICI_EOFBLOCKS_TAG:
return XFS_IEOFBLOCKS;
case XFS_ICI_COWBLOCKS_TAG:
return XFS_ICOWBLOCKS;
default:
ASSERT(0);
return 0;
}
}
static void
__xfs_inode_set_blocks_tag(
xfs_inode_t *ip,
void (*execute)(struct xfs_mount *mp),
void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
int error, unsigned long caller_ip),
int tag)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
int tagged;
/*
* Don't bother locking the AG and looking up in the radix trees
* if we already know that we have the tag set.
*/
if (ip->i_flags & xfs_iflag_for_tag(tag))
return;
spin_lock(&ip->i_flags_lock);
ip->i_flags |= xfs_iflag_for_tag(tag);
spin_unlock(&ip->i_flags_lock);
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
radix_tree_tag_set(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
if (!tagged) {
/* propagate the eofblocks tag up into the perag radix tree */
spin_lock(&ip->i_mount->m_perag_lock);
radix_tree_tag_set(&ip->i_mount->m_perag_tree,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
tag);
spin_unlock(&ip->i_mount->m_perag_lock);
/* kick off background trimming */
execute(ip->i_mount);
set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
}
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
}
void
xfs_inode_set_eofblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_set_eofblocks_tag(ip);
return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
trace_xfs_perag_set_eofblocks,
XFS_ICI_EOFBLOCKS_TAG);
}
static void
__xfs_inode_clear_blocks_tag(
xfs_inode_t *ip,
void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
int error, unsigned long caller_ip),
int tag)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_perag *pag;
spin_lock(&ip->i_flags_lock);
ip->i_flags &= ~xfs_iflag_for_tag(tag);
spin_unlock(&ip->i_flags_lock);
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
spin_lock(&pag->pag_ici_lock);
radix_tree_tag_clear(&pag->pag_ici_root,
XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
/* clear the eofblocks tag from the perag radix tree */
spin_lock(&ip->i_mount->m_perag_lock);
radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
tag);
spin_unlock(&ip->i_mount->m_perag_lock);
clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
}
spin_unlock(&pag->pag_ici_lock);
xfs_perag_put(pag);
}
void
xfs_inode_clear_eofblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_clear_eofblocks_tag(ip);
return __xfs_inode_clear_blocks_tag(ip,
trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
}
/*
* Set ourselves up to free CoW blocks from this file. If it's already clean
* then we can bail out quickly, but otherwise we must back off if the file
* is undergoing some kind of write.
*/
static bool
xfs_prep_free_cowblocks(
struct xfs_inode *ip,
struct xfs_ifork *ifp)
{
/*
* Just clear the tag if we have an empty cow fork or none at all. It's
* possible the inode was fully unshared since it was originally tagged.
*/
if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
trace_xfs_inode_free_cowblocks_invalid(ip);
xfs_inode_clear_cowblocks_tag(ip);
return false;
}
/*
* If the mapping is dirty or under writeback we cannot touch the
* CoW fork. Leave it alone if we're in the midst of a directio.
*/
if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
atomic_read(&VFS_I(ip)->i_dio_count))
return false;
return true;
}
/*
* Automatic CoW Reservation Freeing
*
* These functions automatically garbage collect leftover CoW reservations
* that were made on behalf of a cowextsize hint when we start to run out
* of quota or when the reservations sit around for too long. If the file
* has dirty pages or is undergoing writeback, its CoW reservations will
* be retained.
*
* The actual garbage collection piggybacks off the same code that runs
* the speculative EOF preallocation garbage collector.
*/
STATIC int
xfs_inode_free_cowblocks(
struct xfs_inode *ip,
int flags,
void *args)
{
struct xfs_eofblocks *eofb = args;
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
int match;
int ret = 0;
if (!xfs_prep_free_cowblocks(ip, ifp))
return 0;
if (eofb) {
if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
match = xfs_inode_match_id_union(ip, eofb);
else
match = xfs_inode_match_id(ip, eofb);
if (!match)
return 0;
/* skip the inode if the file size is too small */
if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
XFS_ISIZE(ip) < eofb->eof_min_file_size)
return 0;
}
/* Free the CoW blocks */
xfs_ilock(ip, XFS_IOLOCK_EXCL);
xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
/*
* Check again, nobody else should be able to dirty blocks or change
* the reflink iflag now that we have the first two locks held.
*/
if (xfs_prep_free_cowblocks(ip, ifp))
ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
}
int
xfs_icache_free_cowblocks(
struct xfs_mount *mp,
struct xfs_eofblocks *eofb)
{
return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
XFS_ICI_COWBLOCKS_TAG);
}
int
xfs_inode_free_quota_cowblocks(
struct xfs_inode *ip)
{
return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
}
void
xfs_inode_set_cowblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_set_cowblocks_tag(ip);
return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
trace_xfs_perag_set_cowblocks,
XFS_ICI_COWBLOCKS_TAG);
}
void
xfs_inode_clear_cowblocks_tag(
xfs_inode_t *ip)
{
trace_xfs_inode_clear_cowblocks_tag(ip);
return __xfs_inode_clear_blocks_tag(ip,
trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_212_0 |
crossvul-cpp_data_bad_3067_0 | /*
* Copyright 1995-2016 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the OpenSSL license (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
/* ====================================================================
* Copyright 2002 Sun Microsystems, Inc. ALL RIGHTS RESERVED.
*
* Portions of the attached software ("Contribution") are developed by
* SUN MICROSYSTEMS, INC., and are contributed to the OpenSSL project.
*
* The Contribution is licensed pursuant to the OpenSSL open source
* license provided above.
*
* ECC cipher suite support in OpenSSL originally written by
* Vipul Gupta and Sumit Gupta of Sun Microsystems Laboratories.
*
*/
/* ====================================================================
* Copyright 2005 Nokia. All rights reserved.
*
* The portions of the attached software ("Contribution") is developed by
* Nokia Corporation and is licensed pursuant to the OpenSSL open source
* license.
*
* The Contribution, originally written by Mika Kousa and Pasi Eronen of
* Nokia Corporation, consists of the "PSK" (Pre-Shared Key) ciphersuites
* support (see RFC 4279) to OpenSSL.
*
* No patent licenses or other rights except those expressly stated in
* the OpenSSL open source license shall be deemed granted or received
* expressly, by implication, estoppel, or otherwise.
*
* No assurances are provided by Nokia that the Contribution does not
* infringe the patent or other intellectual property rights of any third
* party or that the license provides you with all the necessary rights
* to make use of the Contribution.
*
* THE SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND. IN
* ADDITION TO THE DISCLAIMERS INCLUDED IN THE LICENSE, NOKIA
* SPECIFICALLY DISCLAIMS ANY LIABILITY FOR CLAIMS BROUGHT BY YOU OR ANY
* OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL PROPERTY RIGHTS OR
* OTHERWISE.
*/
#include <stdio.h>
#include "../ssl_locl.h"
#include "statem_locl.h"
#include <openssl/buffer.h>
#include <openssl/rand.h>
#include <openssl/objects.h>
#include <openssl/evp.h>
#include <openssl/md5.h>
#include <openssl/dh.h>
#include <openssl/bn.h>
#include <openssl/engine.h>
static ossl_inline int cert_req_allowed(SSL *s);
static int key_exchange_expected(SSL *s);
static int ca_dn_cmp(const X509_NAME *const *a, const X509_NAME *const *b);
static int ssl_cipher_list_to_bytes(SSL *s, STACK_OF(SSL_CIPHER) *sk,
unsigned char *p);
/*
* Is a CertificateRequest message allowed at the moment or not?
*
* Return values are:
* 1: Yes
* 0: No
*/
static ossl_inline int cert_req_allowed(SSL *s)
{
/* TLS does not like anon-DH with client cert */
if ((s->version > SSL3_VERSION
&& (s->s3->tmp.new_cipher->algorithm_auth & SSL_aNULL))
|| (s->s3->tmp.new_cipher->algorithm_auth & (SSL_aSRP | SSL_aPSK)))
return 0;
return 1;
}
/*
* Should we expect the ServerKeyExchange message or not?
*
* Return values are:
* 1: Yes
* 0: No
*/
static int key_exchange_expected(SSL *s)
{
long alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
/*
* Can't skip server key exchange if this is an ephemeral
* ciphersuite or for SRP
*/
if (alg_k & (SSL_kDHE | SSL_kECDHE | SSL_kDHEPSK | SSL_kECDHEPSK
| SSL_kSRP)) {
return 1;
}
return 0;
}
/*
* ossl_statem_client_read_transition() encapsulates the logic for the allowed
* handshake state transitions when the client is reading messages from the
* server. The message type that the server has sent is provided in |mt|. The
* current state is in |s->statem.hand_state|.
*
* Return values are:
* 1: Success (transition allowed)
* 0: Error (transition not allowed)
*/
int ossl_statem_client_read_transition(SSL *s, int mt)
{
OSSL_STATEM *st = &s->statem;
int ske_expected;
switch (st->hand_state) {
case TLS_ST_CW_CLNT_HELLO:
if (mt == SSL3_MT_SERVER_HELLO) {
st->hand_state = TLS_ST_CR_SRVR_HELLO;
return 1;
}
if (SSL_IS_DTLS(s)) {
if (mt == DTLS1_MT_HELLO_VERIFY_REQUEST) {
st->hand_state = DTLS_ST_CR_HELLO_VERIFY_REQUEST;
return 1;
}
}
break;
case TLS_ST_CR_SRVR_HELLO:
if (s->hit) {
if (s->tlsext_ticket_expected) {
if (mt == SSL3_MT_NEWSESSION_TICKET) {
st->hand_state = TLS_ST_CR_SESSION_TICKET;
return 1;
}
} else if (mt == SSL3_MT_CHANGE_CIPHER_SPEC) {
st->hand_state = TLS_ST_CR_CHANGE;
return 1;
}
} else {
if (SSL_IS_DTLS(s) && mt == DTLS1_MT_HELLO_VERIFY_REQUEST) {
st->hand_state = DTLS_ST_CR_HELLO_VERIFY_REQUEST;
return 1;
} else if (s->version >= TLS1_VERSION
&& s->tls_session_secret_cb != NULL
&& s->session->tlsext_tick != NULL
&& mt == SSL3_MT_CHANGE_CIPHER_SPEC) {
/*
* Normally, we can tell if the server is resuming the session
* from the session ID. EAP-FAST (RFC 4851), however, relies on
* the next server message after the ServerHello to determine if
* the server is resuming.
*/
s->hit = 1;
st->hand_state = TLS_ST_CR_CHANGE;
return 1;
} else if (!(s->s3->tmp.new_cipher->algorithm_auth
& (SSL_aNULL | SSL_aSRP | SSL_aPSK))) {
if (mt == SSL3_MT_CERTIFICATE) {
st->hand_state = TLS_ST_CR_CERT;
return 1;
}
} else {
ske_expected = key_exchange_expected(s);
/* SKE is optional for some PSK ciphersuites */
if (ske_expected
|| ((s->s3->tmp.new_cipher->algorithm_mkey & SSL_PSK)
&& mt == SSL3_MT_SERVER_KEY_EXCHANGE)) {
if (mt == SSL3_MT_SERVER_KEY_EXCHANGE) {
st->hand_state = TLS_ST_CR_KEY_EXCH;
return 1;
}
} else if (mt == SSL3_MT_CERTIFICATE_REQUEST
&& cert_req_allowed(s)) {
st->hand_state = TLS_ST_CR_CERT_REQ;
return 1;
} else if (mt == SSL3_MT_SERVER_DONE) {
st->hand_state = TLS_ST_CR_SRVR_DONE;
return 1;
}
}
}
break;
case TLS_ST_CR_CERT:
/*
* The CertificateStatus message is optional even if
* |tlsext_status_expected| is set
*/
if (s->tlsext_status_expected && mt == SSL3_MT_CERTIFICATE_STATUS) {
st->hand_state = TLS_ST_CR_CERT_STATUS;
return 1;
}
/* Fall through */
case TLS_ST_CR_CERT_STATUS:
ske_expected = key_exchange_expected(s);
/* SKE is optional for some PSK ciphersuites */
if (ske_expected || ((s->s3->tmp.new_cipher->algorithm_mkey & SSL_PSK)
&& mt == SSL3_MT_SERVER_KEY_EXCHANGE)) {
if (mt == SSL3_MT_SERVER_KEY_EXCHANGE) {
st->hand_state = TLS_ST_CR_KEY_EXCH;
return 1;
}
goto err;
}
/* Fall through */
case TLS_ST_CR_KEY_EXCH:
if (mt == SSL3_MT_CERTIFICATE_REQUEST) {
if (cert_req_allowed(s)) {
st->hand_state = TLS_ST_CR_CERT_REQ;
return 1;
}
goto err;
}
/* Fall through */
case TLS_ST_CR_CERT_REQ:
if (mt == SSL3_MT_SERVER_DONE) {
st->hand_state = TLS_ST_CR_SRVR_DONE;
return 1;
}
break;
case TLS_ST_CW_FINISHED:
if (s->tlsext_ticket_expected) {
if (mt == SSL3_MT_NEWSESSION_TICKET) {
st->hand_state = TLS_ST_CR_SESSION_TICKET;
return 1;
}
} else if (mt == SSL3_MT_CHANGE_CIPHER_SPEC) {
st->hand_state = TLS_ST_CR_CHANGE;
return 1;
}
break;
case TLS_ST_CR_SESSION_TICKET:
if (mt == SSL3_MT_CHANGE_CIPHER_SPEC) {
st->hand_state = TLS_ST_CR_CHANGE;
return 1;
}
break;
case TLS_ST_CR_CHANGE:
if (mt == SSL3_MT_FINISHED) {
st->hand_state = TLS_ST_CR_FINISHED;
return 1;
}
break;
default:
break;
}
err:
/* No valid transition found */
ssl3_send_alert(s, SSL3_AL_FATAL, SSL3_AD_UNEXPECTED_MESSAGE);
SSLerr(SSL_F_OSSL_STATEM_CLIENT_READ_TRANSITION, SSL_R_UNEXPECTED_MESSAGE);
return 0;
}
/*
* client_write_transition() works out what handshake state to move to next
* when the client is writing messages to be sent to the server.
*/
WRITE_TRAN ossl_statem_client_write_transition(SSL *s)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_OK:
/* Renegotiation - fall through */
case TLS_ST_BEFORE:
st->hand_state = TLS_ST_CW_CLNT_HELLO;
return WRITE_TRAN_CONTINUE;
case TLS_ST_CW_CLNT_HELLO:
/*
* No transition at the end of writing because we don't know what
* we will be sent
*/
return WRITE_TRAN_FINISHED;
case DTLS_ST_CR_HELLO_VERIFY_REQUEST:
st->hand_state = TLS_ST_CW_CLNT_HELLO;
return WRITE_TRAN_CONTINUE;
case TLS_ST_CR_SRVR_DONE:
if (s->s3->tmp.cert_req)
st->hand_state = TLS_ST_CW_CERT;
else
st->hand_state = TLS_ST_CW_KEY_EXCH;
return WRITE_TRAN_CONTINUE;
case TLS_ST_CW_CERT:
st->hand_state = TLS_ST_CW_KEY_EXCH;
return WRITE_TRAN_CONTINUE;
case TLS_ST_CW_KEY_EXCH:
/*
* For TLS, cert_req is set to 2, so a cert chain of nothing is
* sent, but no verify packet is sent
*/
/*
* XXX: For now, we do not support client authentication in ECDH
* cipher suites with ECDH (rather than ECDSA) certificates. We
* need to skip the certificate verify message when client's
* ECDH public key is sent inside the client certificate.
*/
if (s->s3->tmp.cert_req == 1) {
st->hand_state = TLS_ST_CW_CERT_VRFY;
} else {
st->hand_state = TLS_ST_CW_CHANGE;
}
if (s->s3->flags & TLS1_FLAGS_SKIP_CERT_VERIFY) {
st->hand_state = TLS_ST_CW_CHANGE;
}
return WRITE_TRAN_CONTINUE;
case TLS_ST_CW_CERT_VRFY:
st->hand_state = TLS_ST_CW_CHANGE;
return WRITE_TRAN_CONTINUE;
case TLS_ST_CW_CHANGE:
#if defined(OPENSSL_NO_NEXTPROTONEG)
st->hand_state = TLS_ST_CW_FINISHED;
#else
if (!SSL_IS_DTLS(s) && s->s3->next_proto_neg_seen)
st->hand_state = TLS_ST_CW_NEXT_PROTO;
else
st->hand_state = TLS_ST_CW_FINISHED;
#endif
return WRITE_TRAN_CONTINUE;
#if !defined(OPENSSL_NO_NEXTPROTONEG)
case TLS_ST_CW_NEXT_PROTO:
st->hand_state = TLS_ST_CW_FINISHED;
return WRITE_TRAN_CONTINUE;
#endif
case TLS_ST_CW_FINISHED:
if (s->hit) {
st->hand_state = TLS_ST_OK;
ossl_statem_set_in_init(s, 0);
return WRITE_TRAN_CONTINUE;
} else {
return WRITE_TRAN_FINISHED;
}
case TLS_ST_CR_FINISHED:
if (s->hit) {
st->hand_state = TLS_ST_CW_CHANGE;
return WRITE_TRAN_CONTINUE;
} else {
st->hand_state = TLS_ST_OK;
ossl_statem_set_in_init(s, 0);
return WRITE_TRAN_CONTINUE;
}
default:
/* Shouldn't happen */
return WRITE_TRAN_ERROR;
}
}
/*
* Perform any pre work that needs to be done prior to sending a message from
* the client to the server.
*/
WORK_STATE ossl_statem_client_pre_work(SSL *s, WORK_STATE wst)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_CW_CLNT_HELLO:
s->shutdown = 0;
if (SSL_IS_DTLS(s)) {
/* every DTLS ClientHello resets Finished MAC */
if (!ssl3_init_finished_mac(s)) {
ossl_statem_set_error(s);
return WORK_ERROR;
}
}
break;
case TLS_ST_CW_CHANGE:
if (SSL_IS_DTLS(s)) {
if (s->hit) {
/*
* We're into the last flight so we don't retransmit these
* messages unless we need to.
*/
st->use_timer = 0;
}
#ifndef OPENSSL_NO_SCTP
if (BIO_dgram_is_sctp(SSL_get_wbio(s)))
return dtls_wait_for_dry(s);
#endif
}
return WORK_FINISHED_CONTINUE;
case TLS_ST_OK:
return tls_finish_handshake(s, wst);
default:
/* No pre work to be done */
break;
}
return WORK_FINISHED_CONTINUE;
}
/*
* Perform any work that needs to be done after sending a message from the
* client to the server.
*/
WORK_STATE ossl_statem_client_post_work(SSL *s, WORK_STATE wst)
{
OSSL_STATEM *st = &s->statem;
s->init_num = 0;
switch (st->hand_state) {
case TLS_ST_CW_CLNT_HELLO:
if (wst == WORK_MORE_A && statem_flush(s) != 1)
return WORK_MORE_A;
if (SSL_IS_DTLS(s)) {
/* Treat the next message as the first packet */
s->first_packet = 1;
}
break;
case TLS_ST_CW_KEY_EXCH:
if (tls_client_key_exchange_post_work(s) == 0)
return WORK_ERROR;
break;
case TLS_ST_CW_CHANGE:
s->session->cipher = s->s3->tmp.new_cipher;
#ifdef OPENSSL_NO_COMP
s->session->compress_meth = 0;
#else
if (s->s3->tmp.new_compression == NULL)
s->session->compress_meth = 0;
else
s->session->compress_meth = s->s3->tmp.new_compression->id;
#endif
if (!s->method->ssl3_enc->setup_key_block(s))
return WORK_ERROR;
if (!s->method->ssl3_enc->change_cipher_state(s,
SSL3_CHANGE_CIPHER_CLIENT_WRITE))
return WORK_ERROR;
if (SSL_IS_DTLS(s)) {
#ifndef OPENSSL_NO_SCTP
if (s->hit) {
/*
* Change to new shared key of SCTP-Auth, will be ignored if
* no SCTP used.
*/
BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_NEXT_AUTH_KEY,
0, NULL);
}
#endif
dtls1_reset_seq_numbers(s, SSL3_CC_WRITE);
}
break;
case TLS_ST_CW_FINISHED:
#ifndef OPENSSL_NO_SCTP
if (wst == WORK_MORE_A && SSL_IS_DTLS(s) && s->hit == 0) {
/*
* Change to new shared key of SCTP-Auth, will be ignored if
* no SCTP used.
*/
BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_NEXT_AUTH_KEY,
0, NULL);
}
#endif
if (statem_flush(s) != 1)
return WORK_MORE_B;
break;
default:
/* No post work to be done */
break;
}
return WORK_FINISHED_CONTINUE;
}
/*
* Construct a message to be sent from the client to the server.
*
* Valid return values are:
* 1: Success
* 0: Error
*/
int ossl_statem_client_construct_message(SSL *s)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_CW_CLNT_HELLO:
return tls_construct_client_hello(s);
case TLS_ST_CW_CERT:
return tls_construct_client_certificate(s);
case TLS_ST_CW_KEY_EXCH:
return tls_construct_client_key_exchange(s);
case TLS_ST_CW_CERT_VRFY:
return tls_construct_client_verify(s);
case TLS_ST_CW_CHANGE:
if (SSL_IS_DTLS(s))
return dtls_construct_change_cipher_spec(s);
else
return tls_construct_change_cipher_spec(s);
#if !defined(OPENSSL_NO_NEXTPROTONEG)
case TLS_ST_CW_NEXT_PROTO:
return tls_construct_next_proto(s);
#endif
case TLS_ST_CW_FINISHED:
return tls_construct_finished(s,
s->method->
ssl3_enc->client_finished_label,
s->method->
ssl3_enc->client_finished_label_len);
default:
/* Shouldn't happen */
break;
}
return 0;
}
/*
* Returns the maximum allowed length for the current message that we are
* reading. Excludes the message header.
*/
unsigned long ossl_statem_client_max_message_size(SSL *s)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_CR_SRVR_HELLO:
return SERVER_HELLO_MAX_LENGTH;
case DTLS_ST_CR_HELLO_VERIFY_REQUEST:
return HELLO_VERIFY_REQUEST_MAX_LENGTH;
case TLS_ST_CR_CERT:
return s->max_cert_list;
case TLS_ST_CR_CERT_STATUS:
return SSL3_RT_MAX_PLAIN_LENGTH;
case TLS_ST_CR_KEY_EXCH:
return SERVER_KEY_EXCH_MAX_LENGTH;
case TLS_ST_CR_CERT_REQ:
/*
* Set to s->max_cert_list for compatibility with previous releases. In
* practice these messages can get quite long if servers are configured
* to provide a long list of acceptable CAs
*/
return s->max_cert_list;
case TLS_ST_CR_SRVR_DONE:
return SERVER_HELLO_DONE_MAX_LENGTH;
case TLS_ST_CR_CHANGE:
if (s->version == DTLS1_BAD_VER)
return 3;
return CCS_MAX_LENGTH;
case TLS_ST_CR_SESSION_TICKET:
return SSL3_RT_MAX_PLAIN_LENGTH;
case TLS_ST_CR_FINISHED:
return FINISHED_MAX_LENGTH;
default:
/* Shouldn't happen */
break;
}
return 0;
}
/*
* Process a message that the client has been received from the server.
*/
MSG_PROCESS_RETURN ossl_statem_client_process_message(SSL *s, PACKET *pkt)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_CR_SRVR_HELLO:
return tls_process_server_hello(s, pkt);
case DTLS_ST_CR_HELLO_VERIFY_REQUEST:
return dtls_process_hello_verify(s, pkt);
case TLS_ST_CR_CERT:
return tls_process_server_certificate(s, pkt);
case TLS_ST_CR_CERT_STATUS:
return tls_process_cert_status(s, pkt);
case TLS_ST_CR_KEY_EXCH:
return tls_process_key_exchange(s, pkt);
case TLS_ST_CR_CERT_REQ:
return tls_process_certificate_request(s, pkt);
case TLS_ST_CR_SRVR_DONE:
return tls_process_server_done(s, pkt);
case TLS_ST_CR_CHANGE:
return tls_process_change_cipher_spec(s, pkt);
case TLS_ST_CR_SESSION_TICKET:
return tls_process_new_session_ticket(s, pkt);
case TLS_ST_CR_FINISHED:
return tls_process_finished(s, pkt);
default:
/* Shouldn't happen */
break;
}
return MSG_PROCESS_ERROR;
}
/*
* Perform any further processing required following the receipt of a message
* from the server
*/
WORK_STATE ossl_statem_client_post_process_message(SSL *s, WORK_STATE wst)
{
OSSL_STATEM *st = &s->statem;
switch (st->hand_state) {
case TLS_ST_CR_CERT_REQ:
return tls_prepare_client_certificate(s, wst);
#ifndef OPENSSL_NO_SCTP
case TLS_ST_CR_SRVR_DONE:
/* We only get here if we are using SCTP and we are renegotiating */
if (BIO_dgram_sctp_msg_waiting(SSL_get_rbio(s))) {
s->s3->in_read_app_data = 2;
s->rwstate = SSL_READING;
BIO_clear_retry_flags(SSL_get_rbio(s));
BIO_set_retry_read(SSL_get_rbio(s));
ossl_statem_set_sctp_read_sock(s, 1);
return WORK_MORE_A;
}
ossl_statem_set_sctp_read_sock(s, 0);
return WORK_FINISHED_STOP;
#endif
default:
break;
}
/* Shouldn't happen */
return WORK_ERROR;
}
int tls_construct_client_hello(SSL *s)
{
unsigned char *buf;
unsigned char *p, *d;
int i;
int protverr;
unsigned long l;
int al = 0;
#ifndef OPENSSL_NO_COMP
int j;
SSL_COMP *comp;
#endif
SSL_SESSION *sess = s->session;
buf = (unsigned char *)s->init_buf->data;
/* Work out what SSL/TLS/DTLS version to use */
protverr = ssl_set_client_hello_version(s);
if (protverr != 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_HELLO, protverr);
goto err;
}
if ((sess == NULL) || !ssl_version_supported(s, sess->ssl_version) ||
/*
* In the case of EAP-FAST, we can have a pre-shared
* "ticket" without a session ID.
*/
(!sess->session_id_length && !sess->tlsext_tick) ||
(sess->not_resumable)) {
if (!ssl_get_new_session(s, 0))
goto err;
}
/* else use the pre-loaded session */
p = s->s3->client_random;
/*
* for DTLS if client_random is initialized, reuse it, we are
* required to use same upon reply to HelloVerify
*/
if (SSL_IS_DTLS(s)) {
size_t idx;
i = 1;
for (idx = 0; idx < sizeof(s->s3->client_random); idx++) {
if (p[idx]) {
i = 0;
break;
}
}
} else
i = 1;
if (i && ssl_fill_hello_random(s, 0, p, sizeof(s->s3->client_random)) <= 0)
goto err;
/* Do the message type and length last */
d = p = ssl_handshake_start(s);
/*-
* version indicates the negotiated version: for example from
* an SSLv2/v3 compatible client hello). The client_version
* field is the maximum version we permit and it is also
* used in RSA encrypted premaster secrets. Some servers can
* choke if we initially report a higher version then
* renegotiate to a lower one in the premaster secret. This
* didn't happen with TLS 1.0 as most servers supported it
* but it can with TLS 1.1 or later if the server only supports
* 1.0.
*
* Possible scenario with previous logic:
* 1. Client hello indicates TLS 1.2
* 2. Server hello says TLS 1.0
* 3. RSA encrypted premaster secret uses 1.2.
* 4. Handshake proceeds using TLS 1.0.
* 5. Server sends hello request to renegotiate.
* 6. Client hello indicates TLS v1.0 as we now
* know that is maximum server supports.
* 7. Server chokes on RSA encrypted premaster secret
* containing version 1.0.
*
* For interoperability it should be OK to always use the
* maximum version we support in client hello and then rely
* on the checking of version to ensure the servers isn't
* being inconsistent: for example initially negotiating with
* TLS 1.0 and renegotiating with TLS 1.2. We do this by using
* client_version in client hello and not resetting it to
* the negotiated version.
*/
*(p++) = s->client_version >> 8;
*(p++) = s->client_version & 0xff;
/* Random stuff */
memcpy(p, s->s3->client_random, SSL3_RANDOM_SIZE);
p += SSL3_RANDOM_SIZE;
/* Session ID */
if (s->new_session)
i = 0;
else
i = s->session->session_id_length;
*(p++) = i;
if (i != 0) {
if (i > (int)sizeof(s->session->session_id)) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
memcpy(p, s->session->session_id, i);
p += i;
}
/* cookie stuff for DTLS */
if (SSL_IS_DTLS(s)) {
if (s->d1->cookie_len > sizeof(s->d1->cookie)) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
*(p++) = s->d1->cookie_len;
memcpy(p, s->d1->cookie, s->d1->cookie_len);
p += s->d1->cookie_len;
}
/* Ciphers supported */
i = ssl_cipher_list_to_bytes(s, SSL_get_ciphers(s), &(p[2]));
if (i == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_HELLO, SSL_R_NO_CIPHERS_AVAILABLE);
goto err;
}
#ifdef OPENSSL_MAX_TLS1_2_CIPHER_LENGTH
/*
* Some servers hang if client hello > 256 bytes as hack workaround
* chop number of supported ciphers to keep it well below this if we
* use TLS v1.2
*/
if (TLS1_get_version(s) >= TLS1_2_VERSION
&& i > OPENSSL_MAX_TLS1_2_CIPHER_LENGTH)
i = OPENSSL_MAX_TLS1_2_CIPHER_LENGTH & ~1;
#endif
s2n(i, p);
p += i;
/* COMPRESSION */
#ifdef OPENSSL_NO_COMP
*(p++) = 1;
#else
if (!ssl_allow_compression(s) || !s->ctx->comp_methods)
j = 0;
else
j = sk_SSL_COMP_num(s->ctx->comp_methods);
*(p++) = 1 + j;
for (i = 0; i < j; i++) {
comp = sk_SSL_COMP_value(s->ctx->comp_methods, i);
*(p++) = comp->id;
}
#endif
*(p++) = 0; /* Add the NULL method */
/* TLS extensions */
if (ssl_prepare_clienthello_tlsext(s) <= 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_HELLO, SSL_R_CLIENTHELLO_TLSEXT);
goto err;
}
if ((p =
ssl_add_clienthello_tlsext(s, p, buf + SSL3_RT_MAX_PLAIN_LENGTH,
&al)) == NULL) {
ssl3_send_alert(s, SSL3_AL_FATAL, al);
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
l = p - d;
if (!ssl_set_handshake_header(s, SSL3_MT_CLIENT_HELLO, l)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_HELLO, ERR_R_INTERNAL_ERROR);
goto err;
}
return 1;
err:
ossl_statem_set_error(s);
return 0;
}
MSG_PROCESS_RETURN dtls_process_hello_verify(SSL *s, PACKET *pkt)
{
int al;
unsigned int cookie_len;
PACKET cookiepkt;
if (!PACKET_forward(pkt, 2)
|| !PACKET_get_length_prefixed_1(pkt, &cookiepkt)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_DTLS_PROCESS_HELLO_VERIFY, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
cookie_len = PACKET_remaining(&cookiepkt);
if (cookie_len > sizeof(s->d1->cookie)) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_DTLS_PROCESS_HELLO_VERIFY, SSL_R_LENGTH_TOO_LONG);
goto f_err;
}
if (!PACKET_copy_bytes(&cookiepkt, s->d1->cookie, cookie_len)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_DTLS_PROCESS_HELLO_VERIFY, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
s->d1->cookie_len = cookie_len;
return MSG_PROCESS_FINISHED_READING;
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
MSG_PROCESS_RETURN tls_process_server_hello(SSL *s, PACKET *pkt)
{
STACK_OF(SSL_CIPHER) *sk;
const SSL_CIPHER *c;
PACKET session_id;
size_t session_id_len;
const unsigned char *cipherchars;
int i, al = SSL_AD_INTERNAL_ERROR;
unsigned int compression;
unsigned int sversion;
int protverr;
#ifndef OPENSSL_NO_COMP
SSL_COMP *comp;
#endif
if (!PACKET_get_net_2(pkt, &sversion)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
protverr = ssl_choose_client_version(s, sversion);
if (protverr != 0) {
al = SSL_AD_PROTOCOL_VERSION;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, protverr);
goto f_err;
}
/* load the server hello data */
/* load the server random */
if (!PACKET_copy_bytes(pkt, s->s3->server_random, SSL3_RANDOM_SIZE)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
s->hit = 0;
/* Get the session-id. */
if (!PACKET_get_length_prefixed_1(pkt, &session_id)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
session_id_len = PACKET_remaining(&session_id);
if (session_id_len > sizeof s->session->session_id
|| session_id_len > SSL3_SESSION_ID_SIZE) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_SSL3_SESSION_ID_TOO_LONG);
goto f_err;
}
if (!PACKET_get_bytes(pkt, &cipherchars, TLS_CIPHER_LEN)) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_LENGTH_MISMATCH);
al = SSL_AD_DECODE_ERROR;
goto f_err;
}
/*
* Check if we can resume the session based on external pre-shared secret.
* EAP-FAST (RFC 4851) supports two types of session resumption.
* Resumption based on server-side state works with session IDs.
* Resumption based on pre-shared Protected Access Credentials (PACs)
* works by overriding the SessionTicket extension at the application
* layer, and does not send a session ID. (We do not know whether EAP-FAST
* servers would honour the session ID.) Therefore, the session ID alone
* is not a reliable indicator of session resumption, so we first check if
* we can resume, and later peek at the next handshake message to see if the
* server wants to resume.
*/
if (s->version >= TLS1_VERSION && s->tls_session_secret_cb &&
s->session->tlsext_tick) {
const SSL_CIPHER *pref_cipher = NULL;
s->session->master_key_length = sizeof(s->session->master_key);
if (s->tls_session_secret_cb(s, s->session->master_key,
&s->session->master_key_length,
NULL, &pref_cipher,
s->tls_session_secret_cb_arg)) {
s->session->cipher = pref_cipher ?
pref_cipher : ssl_get_cipher_by_char(s, cipherchars);
} else {
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, ERR_R_INTERNAL_ERROR);
al = SSL_AD_INTERNAL_ERROR;
goto f_err;
}
}
if (session_id_len != 0 && session_id_len == s->session->session_id_length
&& memcmp(PACKET_data(&session_id), s->session->session_id,
session_id_len) == 0) {
if (s->sid_ctx_length != s->session->sid_ctx_length
|| memcmp(s->session->sid_ctx, s->sid_ctx, s->sid_ctx_length)) {
/* actually a client application bug */
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO,
SSL_R_ATTEMPT_TO_REUSE_SESSION_IN_DIFFERENT_CONTEXT);
goto f_err;
}
s->hit = 1;
} else {
/*
* If we were trying for session-id reuse but the server
* didn't echo the ID, make a new SSL_SESSION.
* In the case of EAP-FAST and PAC, we do not send a session ID,
* so the PAC-based session secret is always preserved. It'll be
* overwritten if the server refuses resumption.
*/
if (s->session->session_id_length > 0) {
s->ctx->stats.sess_miss++;
if (!ssl_get_new_session(s, 0)) {
goto f_err;
}
}
s->session->ssl_version = s->version;
s->session->session_id_length = session_id_len;
/* session_id_len could be 0 */
memcpy(s->session->session_id, PACKET_data(&session_id),
session_id_len);
}
/* Session version and negotiated protocol version should match */
if (s->version != s->session->ssl_version) {
al = SSL_AD_PROTOCOL_VERSION;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO,
SSL_R_SSL_SESSION_VERSION_MISMATCH);
goto f_err;
}
c = ssl_get_cipher_by_char(s, cipherchars);
if (c == NULL) {
/* unknown cipher */
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_UNKNOWN_CIPHER_RETURNED);
goto f_err;
}
/*
* Now that we know the version, update the check to see if it's an allowed
* version.
*/
s->s3->tmp.min_ver = s->version;
s->s3->tmp.max_ver = s->version;
/*
* If it is a disabled cipher we either didn't send it in client hello,
* or it's not allowed for the selected protocol. So we return an error.
*/
if (ssl_cipher_disabled(s, c, SSL_SECOP_CIPHER_CHECK)) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_WRONG_CIPHER_RETURNED);
goto f_err;
}
sk = ssl_get_ciphers_by_id(s);
i = sk_SSL_CIPHER_find(sk, c);
if (i < 0) {
/* we did not say we would use this cipher */
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_WRONG_CIPHER_RETURNED);
goto f_err;
}
/*
* Depending on the session caching (internal/external), the cipher
* and/or cipher_id values may not be set. Make sure that cipher_id is
* set and use it for comparison.
*/
if (s->session->cipher)
s->session->cipher_id = s->session->cipher->id;
if (s->hit && (s->session->cipher_id != c->id)) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO,
SSL_R_OLD_SESSION_CIPHER_NOT_RETURNED);
goto f_err;
}
s->s3->tmp.new_cipher = c;
/* lets get the compression algorithm */
/* COMPRESSION */
if (!PACKET_get_1(pkt, &compression)) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_LENGTH_MISMATCH);
al = SSL_AD_DECODE_ERROR;
goto f_err;
}
#ifdef OPENSSL_NO_COMP
if (compression != 0) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO,
SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
goto f_err;
}
/*
* If compression is disabled we'd better not try to resume a session
* using compression.
*/
if (s->session->compress_meth != 0) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_INCONSISTENT_COMPRESSION);
goto f_err;
}
#else
if (s->hit && compression != s->session->compress_meth) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO,
SSL_R_OLD_SESSION_COMPRESSION_ALGORITHM_NOT_RETURNED);
goto f_err;
}
if (compression == 0)
comp = NULL;
else if (!ssl_allow_compression(s)) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_COMPRESSION_DISABLED);
goto f_err;
} else {
comp = ssl3_comp_find(s->ctx->comp_methods, compression);
}
if (compression != 0 && comp == NULL) {
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO,
SSL_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
goto f_err;
} else {
s->s3->tmp.new_compression = comp;
}
#endif
/* TLS extensions */
if (!ssl_parse_serverhello_tlsext(s, pkt)) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_PARSE_TLSEXT);
goto err;
}
if (PACKET_remaining(pkt) != 0) {
/* wrong packet length */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SERVER_HELLO, SSL_R_BAD_PACKET_LENGTH);
goto f_err;
}
#ifndef OPENSSL_NO_SCTP
if (SSL_IS_DTLS(s) && s->hit) {
unsigned char sctpauthkey[64];
char labelbuffer[sizeof(DTLS1_SCTP_AUTH_LABEL)];
/*
* Add new shared key for SCTP-Auth, will be ignored if
* no SCTP used.
*/
memcpy(labelbuffer, DTLS1_SCTP_AUTH_LABEL,
sizeof(DTLS1_SCTP_AUTH_LABEL));
if (SSL_export_keying_material(s, sctpauthkey,
sizeof(sctpauthkey),
labelbuffer,
sizeof(labelbuffer), NULL, 0, 0) <= 0)
goto err;
BIO_ctrl(SSL_get_wbio(s),
BIO_CTRL_DGRAM_SCTP_ADD_AUTH_KEY,
sizeof(sctpauthkey), sctpauthkey);
}
#endif
return MSG_PROCESS_CONTINUE_READING;
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
err:
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
MSG_PROCESS_RETURN tls_process_server_certificate(SSL *s, PACKET *pkt)
{
int al, i, ret = MSG_PROCESS_ERROR, exp_idx;
unsigned long cert_list_len, cert_len;
X509 *x = NULL;
const unsigned char *certstart, *certbytes;
STACK_OF(X509) *sk = NULL;
EVP_PKEY *pkey = NULL;
if ((sk = sk_X509_new_null()) == NULL) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE, ERR_R_MALLOC_FAILURE);
goto err;
}
if (!PACKET_get_net_3(pkt, &cert_list_len)
|| PACKET_remaining(pkt) != cert_list_len) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
while (PACKET_remaining(pkt)) {
if (!PACKET_get_net_3(pkt, &cert_len)
|| !PACKET_get_bytes(pkt, &certbytes, cert_len)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,
SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
certstart = certbytes;
x = d2i_X509(NULL, (const unsigned char **)&certbytes, cert_len);
if (x == NULL) {
al = SSL_AD_BAD_CERTIFICATE;
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE, ERR_R_ASN1_LIB);
goto f_err;
}
if (certbytes != (certstart + cert_len)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,
SSL_R_CERT_LENGTH_MISMATCH);
goto f_err;
}
if (!sk_X509_push(sk, x)) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE, ERR_R_MALLOC_FAILURE);
goto err;
}
x = NULL;
}
i = ssl_verify_cert_chain(s, sk);
/*
* The documented interface is that SSL_VERIFY_PEER should be set in order
* for client side verification of the server certificate to take place.
* However, historically the code has only checked that *any* flag is set
* to cause server verification to take place. Use of the other flags makes
* no sense in client mode. An attempt to clean up the semantics was
* reverted because at least one application *only* set
* SSL_VERIFY_FAIL_IF_NO_PEER_CERT. Prior to the clean up this still caused
* server verification to take place, after the clean up it silently did
* nothing. SSL_CTX_set_verify()/SSL_set_verify() cannot validate the flags
* sent to them because they are void functions. Therefore, we now use the
* (less clean) historic behaviour of performing validation if any flag is
* set. The *documented* interface remains the same.
*/
if (s->verify_mode != SSL_VERIFY_NONE && i <= 0) {
al = ssl_verify_alarm_type(s->verify_result);
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,
SSL_R_CERTIFICATE_VERIFY_FAILED);
goto f_err;
}
ERR_clear_error(); /* but we keep s->verify_result */
if (i > 1) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE, i);
al = SSL_AD_HANDSHAKE_FAILURE;
goto f_err;
}
s->session->peer_chain = sk;
/*
* Inconsistency alert: cert_chain does include the peer's certificate,
* which we don't include in statem_srvr.c
*/
x = sk_X509_value(sk, 0);
sk = NULL;
/*
* VRS 19990621: possible memory leak; sk=null ==> !sk_pop_free() @end
*/
pkey = X509_get0_pubkey(x);
if (pkey == NULL || EVP_PKEY_missing_parameters(pkey)) {
x = NULL;
al = SSL3_AL_FATAL;
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,
SSL_R_UNABLE_TO_FIND_PUBLIC_KEY_PARAMETERS);
goto f_err;
}
i = ssl_cert_type(x, pkey);
if (i < 0) {
x = NULL;
al = SSL3_AL_FATAL;
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,
SSL_R_UNKNOWN_CERTIFICATE_TYPE);
goto f_err;
}
exp_idx = ssl_cipher_get_cert_index(s->s3->tmp.new_cipher);
if (exp_idx >= 0 && i != exp_idx
&& (exp_idx != SSL_PKEY_GOST_EC ||
(i != SSL_PKEY_GOST12_512 && i != SSL_PKEY_GOST12_256
&& i != SSL_PKEY_GOST01))) {
x = NULL;
al = SSL_AD_ILLEGAL_PARAMETER;
SSLerr(SSL_F_TLS_PROCESS_SERVER_CERTIFICATE,
SSL_R_WRONG_CERTIFICATE_TYPE);
goto f_err;
}
s->session->peer_type = i;
X509_free(s->session->peer);
X509_up_ref(x);
s->session->peer = x;
s->session->verify_result = s->verify_result;
x = NULL;
ret = MSG_PROCESS_CONTINUE_READING;
goto done;
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
err:
ossl_statem_set_error(s);
done:
X509_free(x);
sk_X509_pop_free(sk, X509_free);
return ret;
}
static int tls_process_ske_psk_preamble(SSL *s, PACKET *pkt, int *al)
{
#ifndef OPENSSL_NO_PSK
PACKET psk_identity_hint;
/* PSK ciphersuites are preceded by an identity hint */
if (!PACKET_get_length_prefixed_2(pkt, &psk_identity_hint)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_PSK_PREAMBLE, SSL_R_LENGTH_MISMATCH);
return 0;
}
/*
* Store PSK identity hint for later use, hint is used in
* tls_construct_client_key_exchange. Assume that the maximum length of
* a PSK identity hint can be as long as the maximum length of a PSK
* identity.
*/
if (PACKET_remaining(&psk_identity_hint) > PSK_MAX_IDENTITY_LEN) {
*al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_PROCESS_SKE_PSK_PREAMBLE, SSL_R_DATA_LENGTH_TOO_LONG);
return 0;
}
if (PACKET_remaining(&psk_identity_hint) == 0) {
OPENSSL_free(s->session->psk_identity_hint);
s->session->psk_identity_hint = NULL;
} else if (!PACKET_strndup(&psk_identity_hint,
&s->session->psk_identity_hint)) {
*al = SSL_AD_INTERNAL_ERROR;
return 0;
}
return 1;
#else
SSLerr(SSL_F_TLS_PROCESS_SKE_PSK_PREAMBLE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_process_ske_srp(SSL *s, PACKET *pkt, EVP_PKEY **pkey, int *al)
{
#ifndef OPENSSL_NO_SRP
PACKET prime, generator, salt, server_pub;
if (!PACKET_get_length_prefixed_2(pkt, &prime)
|| !PACKET_get_length_prefixed_2(pkt, &generator)
|| !PACKET_get_length_prefixed_1(pkt, &salt)
|| !PACKET_get_length_prefixed_2(pkt, &server_pub)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, SSL_R_LENGTH_MISMATCH);
return 0;
}
if ((s->srp_ctx.N =
BN_bin2bn(PACKET_data(&prime),
PACKET_remaining(&prime), NULL)) == NULL
|| (s->srp_ctx.g =
BN_bin2bn(PACKET_data(&generator),
PACKET_remaining(&generator), NULL)) == NULL
|| (s->srp_ctx.s =
BN_bin2bn(PACKET_data(&salt),
PACKET_remaining(&salt), NULL)) == NULL
|| (s->srp_ctx.B =
BN_bin2bn(PACKET_data(&server_pub),
PACKET_remaining(&server_pub), NULL)) == NULL) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, ERR_R_BN_LIB);
return 0;
}
if (!srp_verify_server_param(s, al)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, SSL_R_BAD_SRP_PARAMETERS);
return 0;
}
/* We must check if there is a certificate */
if (s->s3->tmp.new_cipher->algorithm_auth & (SSL_aRSA | SSL_aDSS))
*pkey = X509_get0_pubkey(s->session->peer);
return 1;
#else
SSLerr(SSL_F_TLS_PROCESS_SKE_SRP, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_process_ske_dhe(SSL *s, PACKET *pkt, EVP_PKEY **pkey, int *al)
{
#ifndef OPENSSL_NO_DH
PACKET prime, generator, pub_key;
EVP_PKEY *peer_tmp = NULL;
DH *dh = NULL;
BIGNUM *p = NULL, *g = NULL, *bnpub_key = NULL;
if (!PACKET_get_length_prefixed_2(pkt, &prime)
|| !PACKET_get_length_prefixed_2(pkt, &generator)
|| !PACKET_get_length_prefixed_2(pkt, &pub_key)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, SSL_R_LENGTH_MISMATCH);
return 0;
}
peer_tmp = EVP_PKEY_new();
dh = DH_new();
if (peer_tmp == NULL || dh == NULL) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, ERR_R_MALLOC_FAILURE);
goto err;
}
p = BN_bin2bn(PACKET_data(&prime), PACKET_remaining(&prime), NULL);
g = BN_bin2bn(PACKET_data(&generator), PACKET_remaining(&generator), NULL);
bnpub_key = BN_bin2bn(PACKET_data(&pub_key), PACKET_remaining(&pub_key),
NULL);
if (p == NULL || g == NULL || bnpub_key == NULL) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, ERR_R_BN_LIB);
goto err;
}
if (BN_is_zero(p) || BN_is_zero(g) || BN_is_zero(bnpub_key)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, SSL_R_BAD_DH_VALUE);
goto err;
}
if (!DH_set0_pqg(dh, p, NULL, g)) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, ERR_R_BN_LIB);
goto err;
}
p = g = NULL;
if (!DH_set0_key(dh, bnpub_key, NULL)) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, ERR_R_BN_LIB);
goto err;
}
bnpub_key = NULL;
if (!ssl_security(s, SSL_SECOP_TMP_DH, DH_security_bits(dh), 0, dh)) {
*al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, SSL_R_DH_KEY_TOO_SMALL);
goto err;
}
if (EVP_PKEY_assign_DH(peer_tmp, dh) == 0) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, ERR_R_EVP_LIB);
goto err;
}
s->s3->peer_tmp = peer_tmp;
/*
* FIXME: This makes assumptions about which ciphersuites come with
* public keys. We should have a less ad-hoc way of doing this
*/
if (s->s3->tmp.new_cipher->algorithm_auth & (SSL_aRSA | SSL_aDSS))
*pkey = X509_get0_pubkey(s->session->peer);
/* else anonymous DH, so no certificate or pkey. */
return 1;
err:
BN_free(p);
BN_free(g);
BN_free(bnpub_key);
DH_free(dh);
EVP_PKEY_free(peer_tmp);
return 0;
#else
SSLerr(SSL_F_TLS_PROCESS_SKE_DHE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_process_ske_ecdhe(SSL *s, PACKET *pkt, EVP_PKEY **pkey, int *al)
{
#ifndef OPENSSL_NO_EC
PACKET encoded_pt;
const unsigned char *ecparams;
int curve_nid;
unsigned int curve_flags;
EVP_PKEY_CTX *pctx = NULL;
/*
* Extract elliptic curve parameters and the server's ephemeral ECDH
* public key. For now we only support named (not generic) curves and
* ECParameters in this case is just three bytes.
*/
if (!PACKET_get_bytes(pkt, &ecparams, 3)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE, SSL_R_LENGTH_TOO_SHORT);
return 0;
}
/*
* Check curve is one of our preferences, if not server has sent an
* invalid curve. ECParameters is 3 bytes.
*/
if (!tls1_check_curve(s, ecparams, 3)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE, SSL_R_WRONG_CURVE);
return 0;
}
curve_nid = tls1_ec_curve_id2nid(*(ecparams + 2), &curve_flags);
if (curve_nid == 0) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE,
SSL_R_UNABLE_TO_FIND_ECDH_PARAMETERS);
return 0;
}
if ((curve_flags & TLS_CURVE_TYPE) == TLS_CURVE_CUSTOM) {
EVP_PKEY *key = EVP_PKEY_new();
if (key == NULL || !EVP_PKEY_set_type(key, curve_nid)) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE, ERR_R_EVP_LIB);
EVP_PKEY_free(key);
return 0;
}
s->s3->peer_tmp = key;
} else {
/* Set up EVP_PKEY with named curve as parameters */
pctx = EVP_PKEY_CTX_new_id(EVP_PKEY_EC, NULL);
if (pctx == NULL
|| EVP_PKEY_paramgen_init(pctx) <= 0
|| EVP_PKEY_CTX_set_ec_paramgen_curve_nid(pctx, curve_nid) <= 0
|| EVP_PKEY_paramgen(pctx, &s->s3->peer_tmp) <= 0) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE, ERR_R_EVP_LIB);
EVP_PKEY_CTX_free(pctx);
return 0;
}
EVP_PKEY_CTX_free(pctx);
pctx = NULL;
}
if (!PACKET_get_length_prefixed_1(pkt, &encoded_pt)) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE, SSL_R_LENGTH_MISMATCH);
return 0;
}
if (!EVP_PKEY_set1_tls_encodedpoint(s->s3->peer_tmp,
PACKET_data(&encoded_pt),
PACKET_remaining(&encoded_pt))) {
*al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE, SSL_R_BAD_ECPOINT);
return 0;
}
/*
* The ECC/TLS specification does not mention the use of DSA to sign
* ECParameters in the server key exchange message. We do support RSA
* and ECDSA.
*/
if (s->s3->tmp.new_cipher->algorithm_auth & SSL_aECDSA)
*pkey = X509_get0_pubkey(s->session->peer);
else if (s->s3->tmp.new_cipher->algorithm_auth & SSL_aRSA)
*pkey = X509_get0_pubkey(s->session->peer);
/* else anonymous ECDH, so no certificate or pkey. */
return 1;
#else
SSLerr(SSL_F_TLS_PROCESS_SKE_ECDHE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
MSG_PROCESS_RETURN tls_process_key_exchange(SSL *s, PACKET *pkt)
{
int al = -1;
long alg_k;
EVP_PKEY *pkey = NULL;
PACKET save_param_start, signature;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
save_param_start = *pkt;
#if !defined(OPENSSL_NO_EC) || !defined(OPENSSL_NO_DH)
EVP_PKEY_free(s->s3->peer_tmp);
s->s3->peer_tmp = NULL;
#endif
if (alg_k & SSL_PSK) {
if (!tls_process_ske_psk_preamble(s, pkt, &al))
goto err;
}
/* Nothing else to do for plain PSK or RSAPSK */
if (alg_k & (SSL_kPSK | SSL_kRSAPSK)) {
} else if (alg_k & SSL_kSRP) {
if (!tls_process_ske_srp(s, pkt, &pkey, &al))
goto err;
} else if (alg_k & (SSL_kDHE | SSL_kDHEPSK)) {
if (!tls_process_ske_dhe(s, pkt, &pkey, &al))
goto err;
} else if (alg_k & (SSL_kECDHE | SSL_kECDHEPSK)) {
if (!tls_process_ske_ecdhe(s, pkt, &pkey, &al))
goto err;
} else if (alg_k) {
al = SSL_AD_UNEXPECTED_MESSAGE;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_UNEXPECTED_MESSAGE);
goto err;
}
/* if it was signed, check the signature */
if (pkey != NULL) {
PACKET params;
int maxsig;
const EVP_MD *md = NULL;
EVP_MD_CTX *md_ctx;
/*
* |pkt| now points to the beginning of the signature, so the difference
* equals the length of the parameters.
*/
if (!PACKET_get_sub_packet(&save_param_start, ¶ms,
PACKET_remaining(&save_param_start) -
PACKET_remaining(pkt))) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
if (SSL_USE_SIGALGS(s)) {
const unsigned char *sigalgs;
int rv;
if (!PACKET_get_bytes(pkt, &sigalgs, 2)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_LENGTH_TOO_SHORT);
goto err;
}
rv = tls12_check_peer_sigalg(&md, s, sigalgs, pkey);
if (rv == -1) {
al = SSL_AD_INTERNAL_ERROR;
goto err;
} else if (rv == 0) {
al = SSL_AD_DECODE_ERROR;
goto err;
}
#ifdef SSL_DEBUG
fprintf(stderr, "USING TLSv1.2 HASH %s\n", EVP_MD_name(md));
#endif
} else if (EVP_PKEY_id(pkey) == EVP_PKEY_RSA) {
md = EVP_md5_sha1();
} else {
md = EVP_sha1();
}
if (!PACKET_get_length_prefixed_2(pkt, &signature)
|| PACKET_remaining(pkt) != 0) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_LENGTH_MISMATCH);
goto err;
}
maxsig = EVP_PKEY_size(pkey);
if (maxsig < 0) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
/*
* Check signature length
*/
if (PACKET_remaining(&signature) > (size_t)maxsig) {
/* wrong packet length */
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE,
SSL_R_WRONG_SIGNATURE_LENGTH);
goto err;
}
md_ctx = EVP_MD_CTX_new();
if (md_ctx == NULL) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_MALLOC_FAILURE);
goto err;
}
if (EVP_VerifyInit_ex(md_ctx, md, NULL) <= 0
|| EVP_VerifyUpdate(md_ctx, &(s->s3->client_random[0]),
SSL3_RANDOM_SIZE) <= 0
|| EVP_VerifyUpdate(md_ctx, &(s->s3->server_random[0]),
SSL3_RANDOM_SIZE) <= 0
|| EVP_VerifyUpdate(md_ctx, PACKET_data(¶ms),
PACKET_remaining(¶ms)) <= 0) {
EVP_MD_CTX_free(md_ctx);
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_EVP_LIB);
goto err;
}
if (EVP_VerifyFinal(md_ctx, PACKET_data(&signature),
PACKET_remaining(&signature), pkey) <= 0) {
/* bad signature */
EVP_MD_CTX_free(md_ctx);
al = SSL_AD_DECRYPT_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_BAD_SIGNATURE);
goto err;
}
EVP_MD_CTX_free(md_ctx);
} else {
/* aNULL, aSRP or PSK do not need public keys */
if (!(s->s3->tmp.new_cipher->algorithm_auth & (SSL_aNULL | SSL_aSRP))
&& !(alg_k & SSL_PSK)) {
/* Might be wrong key type, check it */
if (ssl3_check_cert_and_algorithm(s)) {
/* Otherwise this shouldn't happen */
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
} else {
al = SSL_AD_DECODE_ERROR;
}
goto err;
}
/* still data left over */
if (PACKET_remaining(pkt) != 0) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_KEY_EXCHANGE, SSL_R_EXTRA_DATA_IN_MESSAGE);
goto err;
}
}
return MSG_PROCESS_CONTINUE_READING;
err:
if (al != -1)
ssl3_send_alert(s, SSL3_AL_FATAL, al);
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
MSG_PROCESS_RETURN tls_process_certificate_request(SSL *s, PACKET *pkt)
{
int ret = MSG_PROCESS_ERROR;
unsigned int list_len, ctype_num, i, name_len;
X509_NAME *xn = NULL;
const unsigned char *data;
const unsigned char *namestart, *namebytes;
STACK_OF(X509_NAME) *ca_sk = NULL;
if ((ca_sk = sk_X509_NAME_new(ca_dn_cmp)) == NULL) {
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE);
goto err;
}
/* get the certificate types */
if (!PACKET_get_1(pkt, &ctype_num)
|| !PACKET_get_bytes(pkt, &data, ctype_num)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST, SSL_R_LENGTH_MISMATCH);
goto err;
}
OPENSSL_free(s->cert->ctypes);
s->cert->ctypes = NULL;
if (ctype_num > SSL3_CT_NUMBER) {
/* If we exceed static buffer copy all to cert structure */
s->cert->ctypes = OPENSSL_malloc(ctype_num);
if (s->cert->ctypes == NULL) {
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE);
goto err;
}
memcpy(s->cert->ctypes, data, ctype_num);
s->cert->ctype_num = (size_t)ctype_num;
ctype_num = SSL3_CT_NUMBER;
}
for (i = 0; i < ctype_num; i++)
s->s3->tmp.ctype[i] = data[i];
if (SSL_USE_SIGALGS(s)) {
if (!PACKET_get_net_2(pkt, &list_len)
|| !PACKET_get_bytes(pkt, &data, list_len)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST,
SSL_R_LENGTH_MISMATCH);
goto err;
}
/* Clear certificate digests and validity flags */
for (i = 0; i < SSL_PKEY_NUM; i++) {
s->s3->tmp.md[i] = NULL;
s->s3->tmp.valid_flags[i] = 0;
}
if ((list_len & 1) || !tls1_save_sigalgs(s, data, list_len)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST,
SSL_R_SIGNATURE_ALGORITHMS_ERROR);
goto err;
}
if (!tls1_process_sigalgs(s)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE);
goto err;
}
} else {
ssl_set_default_md(s);
}
/* get the CA RDNs */
if (!PACKET_get_net_2(pkt, &list_len)
|| PACKET_remaining(pkt) != list_len) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST, SSL_R_LENGTH_MISMATCH);
goto err;
}
while (PACKET_remaining(pkt)) {
if (!PACKET_get_net_2(pkt, &name_len)
|| !PACKET_get_bytes(pkt, &namebytes, name_len)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST,
SSL_R_LENGTH_MISMATCH);
goto err;
}
namestart = namebytes;
if ((xn = d2i_X509_NAME(NULL, (const unsigned char **)&namebytes,
name_len)) == NULL) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST, ERR_R_ASN1_LIB);
goto err;
}
if (namebytes != (namestart + name_len)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST,
SSL_R_CA_DN_LENGTH_MISMATCH);
goto err;
}
if (!sk_X509_NAME_push(ca_sk, xn)) {
SSLerr(SSL_F_TLS_PROCESS_CERTIFICATE_REQUEST, ERR_R_MALLOC_FAILURE);
goto err;
}
xn = NULL;
}
/* we should setup a certificate to return.... */
s->s3->tmp.cert_req = 1;
s->s3->tmp.ctype_num = ctype_num;
sk_X509_NAME_pop_free(s->s3->tmp.ca_names, X509_NAME_free);
s->s3->tmp.ca_names = ca_sk;
ca_sk = NULL;
ret = MSG_PROCESS_CONTINUE_PROCESSING;
goto done;
err:
ossl_statem_set_error(s);
done:
X509_NAME_free(xn);
sk_X509_NAME_pop_free(ca_sk, X509_NAME_free);
return ret;
}
static int ca_dn_cmp(const X509_NAME *const *a, const X509_NAME *const *b)
{
return (X509_NAME_cmp(*a, *b));
}
MSG_PROCESS_RETURN tls_process_new_session_ticket(SSL *s, PACKET *pkt)
{
int al;
unsigned int ticklen;
unsigned long ticket_lifetime_hint;
if (!PACKET_get_net_4(pkt, &ticket_lifetime_hint)
|| !PACKET_get_net_2(pkt, &ticklen)
|| PACKET_remaining(pkt) != ticklen) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
/* Server is allowed to change its mind and send an empty ticket. */
if (ticklen == 0)
return MSG_PROCESS_CONTINUE_READING;
if (s->session->session_id_length > 0) {
int i = s->session_ctx->session_cache_mode;
SSL_SESSION *new_sess;
/*
* We reused an existing session, so we need to replace it with a new
* one
*/
if (i & SSL_SESS_CACHE_CLIENT) {
/*
* Remove the old session from the cache. We carry on if this fails
*/
SSL_CTX_remove_session(s->session_ctx, s->session);
}
if ((new_sess = ssl_session_dup(s->session, 0)) == 0) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_NEW_SESSION_TICKET, ERR_R_MALLOC_FAILURE);
goto f_err;
}
SSL_SESSION_free(s->session);
s->session = new_sess;
}
OPENSSL_free(s->session->tlsext_tick);
s->session->tlsext_ticklen = 0;
s->session->tlsext_tick = OPENSSL_malloc(ticklen);
if (s->session->tlsext_tick == NULL) {
SSLerr(SSL_F_TLS_PROCESS_NEW_SESSION_TICKET, ERR_R_MALLOC_FAILURE);
goto err;
}
if (!PACKET_copy_bytes(pkt, s->session->tlsext_tick, ticklen)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_NEW_SESSION_TICKET, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
s->session->tlsext_tick_lifetime_hint = ticket_lifetime_hint;
s->session->tlsext_ticklen = ticklen;
/*
* There are two ways to detect a resumed ticket session. One is to set
* an appropriate session ID and then the server must return a match in
* ServerHello. This allows the normal client session ID matching to work
* and we know much earlier that the ticket has been accepted. The
* other way is to set zero length session ID when the ticket is
* presented and rely on the handshake to determine session resumption.
* We choose the former approach because this fits in with assumptions
* elsewhere in OpenSSL. The session ID is set to the SHA256 (or SHA1 is
* SHA256 is disabled) hash of the ticket.
*/
if (!EVP_Digest(s->session->tlsext_tick, ticklen,
s->session->session_id, &s->session->session_id_length,
EVP_sha256(), NULL)) {
SSLerr(SSL_F_TLS_PROCESS_NEW_SESSION_TICKET, ERR_R_EVP_LIB);
goto err;
}
return MSG_PROCESS_CONTINUE_READING;
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
err:
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
MSG_PROCESS_RETURN tls_process_cert_status(SSL *s, PACKET *pkt)
{
int al;
unsigned long resplen;
unsigned int type;
if (!PACKET_get_1(pkt, &type)
|| type != TLSEXT_STATUSTYPE_ocsp) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_CERT_STATUS, SSL_R_UNSUPPORTED_STATUS_TYPE);
goto f_err;
}
if (!PACKET_get_net_3(pkt, &resplen)
|| PACKET_remaining(pkt) != resplen) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_CERT_STATUS, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
s->tlsext_ocsp_resp = OPENSSL_malloc(resplen);
if (s->tlsext_ocsp_resp == NULL) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_PROCESS_CERT_STATUS, ERR_R_MALLOC_FAILURE);
goto f_err;
}
if (!PACKET_copy_bytes(pkt, s->tlsext_ocsp_resp, resplen)) {
al = SSL_AD_DECODE_ERROR;
SSLerr(SSL_F_TLS_PROCESS_CERT_STATUS, SSL_R_LENGTH_MISMATCH);
goto f_err;
}
s->tlsext_ocsp_resplen = resplen;
return MSG_PROCESS_CONTINUE_READING;
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
MSG_PROCESS_RETURN tls_process_server_done(SSL *s, PACKET *pkt)
{
if (PACKET_remaining(pkt) > 0) {
/* should contain no data */
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_DECODE_ERROR);
SSLerr(SSL_F_TLS_PROCESS_SERVER_DONE, SSL_R_LENGTH_MISMATCH);
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
#ifndef OPENSSL_NO_SRP
if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) {
if (SRP_Calc_A_param(s) <= 0) {
SSLerr(SSL_F_TLS_PROCESS_SERVER_DONE, SSL_R_SRP_A_CALC);
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
}
#endif
/*
* at this point we check that we have the required stuff from
* the server
*/
if (!ssl3_check_cert_and_algorithm(s)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
ossl_statem_set_error(s);
return MSG_PROCESS_ERROR;
}
/*
* Call the ocsp status callback if needed. The |tlsext_ocsp_resp| and
* |tlsext_ocsp_resplen| values will be set if we actually received a status
* message, or NULL and -1 otherwise
*/
if (s->tlsext_status_type != -1 && s->ctx->tlsext_status_cb != NULL) {
int ret;
ret = s->ctx->tlsext_status_cb(s, s->ctx->tlsext_status_arg);
if (ret == 0) {
ssl3_send_alert(s, SSL3_AL_FATAL,
SSL_AD_BAD_CERTIFICATE_STATUS_RESPONSE);
SSLerr(SSL_F_TLS_PROCESS_SERVER_DONE,
SSL_R_INVALID_STATUS_RESPONSE);
return MSG_PROCESS_ERROR;
}
if (ret < 0) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
SSLerr(SSL_F_TLS_PROCESS_SERVER_DONE, ERR_R_MALLOC_FAILURE);
return MSG_PROCESS_ERROR;
}
}
#ifndef OPENSSL_NO_CT
if (s->ct_validation_callback != NULL) {
/* Note we validate the SCTs whether or not we abort on error */
if (!ssl_validate_ct(s) && (s->verify_mode & SSL_VERIFY_PEER)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
return MSG_PROCESS_ERROR;
}
}
#endif
#ifndef OPENSSL_NO_SCTP
/* Only applies to renegotiation */
if (SSL_IS_DTLS(s) && BIO_dgram_is_sctp(SSL_get_wbio(s))
&& s->renegotiate != 0)
return MSG_PROCESS_CONTINUE_PROCESSING;
else
#endif
return MSG_PROCESS_FINISHED_READING;
}
static int tls_construct_cke_psk_preamble(SSL *s, unsigned char **p,
size_t *pskhdrlen, int *al)
{
#ifndef OPENSSL_NO_PSK
int ret = 0;
/*
* The callback needs PSK_MAX_IDENTITY_LEN + 1 bytes to return a
* \0-terminated identity. The last byte is for us for simulating
* strnlen.
*/
char identity[PSK_MAX_IDENTITY_LEN + 1];
size_t identitylen = 0;
unsigned char psk[PSK_MAX_PSK_LEN];
unsigned char *tmppsk = NULL;
char *tmpidentity = NULL;
size_t psklen = 0;
if (s->psk_client_callback == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_PSK_PREAMBLE, SSL_R_PSK_NO_CLIENT_CB);
*al = SSL_AD_INTERNAL_ERROR;
goto err;
}
memset(identity, 0, sizeof(identity));
psklen = s->psk_client_callback(s, s->session->psk_identity_hint,
identity, sizeof(identity) - 1,
psk, sizeof(psk));
if (psklen > PSK_MAX_PSK_LEN) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_PSK_PREAMBLE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_HANDSHAKE_FAILURE;
goto err;
} else if (psklen == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_PSK_PREAMBLE,
SSL_R_PSK_IDENTITY_NOT_FOUND);
*al = SSL_AD_HANDSHAKE_FAILURE;
goto err;
}
identitylen = strlen(identity);
if (identitylen > PSK_MAX_IDENTITY_LEN) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_PSK_PREAMBLE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_HANDSHAKE_FAILURE;
goto err;
}
tmppsk = OPENSSL_memdup(psk, psklen);
tmpidentity = OPENSSL_strdup(identity);
if (tmppsk == NULL || tmpidentity == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_PSK_PREAMBLE, ERR_R_MALLOC_FAILURE);
*al = SSL_AD_INTERNAL_ERROR;
goto err;
}
OPENSSL_free(s->s3->tmp.psk);
s->s3->tmp.psk = tmppsk;
s->s3->tmp.psklen = psklen;
tmppsk = NULL;
OPENSSL_free(s->session->psk_identity);
s->session->psk_identity = tmpidentity;
tmpidentity = NULL;
s2n(identitylen, *p);
memcpy(*p, identity, identitylen);
*pskhdrlen = 2 + identitylen;
*p += identitylen;
ret = 1;
err:
OPENSSL_cleanse(psk, psklen);
OPENSSL_cleanse(identity, sizeof(identity));
OPENSSL_clear_free(tmppsk, psklen);
OPENSSL_clear_free(tmpidentity, identitylen);
return ret;
#else
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_PSK_PREAMBLE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_construct_cke_rsa(SSL *s, unsigned char **p, int *len, int *al)
{
#ifndef OPENSSL_NO_RSA
unsigned char *q;
EVP_PKEY *pkey = NULL;
EVP_PKEY_CTX *pctx = NULL;
size_t enclen;
unsigned char *pms = NULL;
size_t pmslen = 0;
if (s->session->peer == NULL) {
/*
* We should always have a server certificate with SSL_kRSA.
*/
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_RSA, ERR_R_INTERNAL_ERROR);
return 0;
}
pkey = X509_get0_pubkey(s->session->peer);
if (EVP_PKEY_get0_RSA(pkey) == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_RSA, ERR_R_INTERNAL_ERROR);
return 0;
}
pmslen = SSL_MAX_MASTER_KEY_LENGTH;
pms = OPENSSL_malloc(pmslen);
if (pms == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_RSA, ERR_R_MALLOC_FAILURE);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
}
pms[0] = s->client_version >> 8;
pms[1] = s->client_version & 0xff;
if (RAND_bytes(pms + 2, pmslen - 2) <= 0) {
goto err;
}
q = *p;
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION)
*p += 2;
pctx = EVP_PKEY_CTX_new(pkey, NULL);
if (pctx == NULL || EVP_PKEY_encrypt_init(pctx) <= 0
|| EVP_PKEY_encrypt(pctx, NULL, &enclen, pms, pmslen) <= 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_RSA, ERR_R_EVP_LIB);
goto err;
}
if (EVP_PKEY_encrypt(pctx, *p, &enclen, pms, pmslen) <= 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_RSA, SSL_R_BAD_RSA_ENCRYPT);
goto err;
}
*len = enclen;
EVP_PKEY_CTX_free(pctx);
pctx = NULL;
# ifdef PKCS1_CHECK
if (s->options & SSL_OP_PKCS1_CHECK_1)
(*p)[1]++;
if (s->options & SSL_OP_PKCS1_CHECK_2)
tmp_buf[0] = 0x70;
# endif
/* Fix buf for TLS and beyond */
if (s->version > SSL3_VERSION) {
s2n(*len, q);
*len += 2;
}
s->s3->tmp.pms = pms;
s->s3->tmp.pmslen = pmslen;
return 1;
err:
OPENSSL_clear_free(pms, pmslen);
EVP_PKEY_CTX_free(pctx);
return 0;
#else
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_RSA, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_construct_cke_dhe(SSL *s, unsigned char **p, int *len, int *al)
{
#ifndef OPENSSL_NO_DH
DH *dh_clnt = NULL;
const BIGNUM *pub_key;
EVP_PKEY *ckey = NULL, *skey = NULL;
skey = s->s3->peer_tmp;
if (skey == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_DHE, ERR_R_INTERNAL_ERROR);
return 0;
}
ckey = ssl_generate_pkey(skey);
dh_clnt = EVP_PKEY_get0_DH(ckey);
if (dh_clnt == NULL || ssl_derive(s, ckey, skey) == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_DHE, ERR_R_INTERNAL_ERROR);
EVP_PKEY_free(ckey);
return 0;
}
/* send off the data */
DH_get0_key(dh_clnt, &pub_key, NULL);
*len = BN_num_bytes(pub_key);
s2n(*len, *p);
BN_bn2bin(pub_key, *p);
*len += 2;
EVP_PKEY_free(ckey);
return 1;
#else
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_DHE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_construct_cke_ecdhe(SSL *s, unsigned char **p, int *len, int *al)
{
#ifndef OPENSSL_NO_EC
unsigned char *encodedPoint = NULL;
int encoded_pt_len = 0;
EVP_PKEY *ckey = NULL, *skey = NULL;
skey = s->s3->peer_tmp;
if (skey == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_ECDHE, ERR_R_INTERNAL_ERROR);
return 0;
}
ckey = ssl_generate_pkey(skey);
if (ssl_derive(s, ckey, skey) == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_ECDHE, ERR_R_EVP_LIB);
goto err;
}
/* Generate encoding of client key */
encoded_pt_len = EVP_PKEY_get1_tls_encodedpoint(ckey, &encodedPoint);
if (encoded_pt_len == 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_ECDHE, ERR_R_EC_LIB);
goto err;
}
EVP_PKEY_free(ckey);
ckey = NULL;
*len = encoded_pt_len;
/* length of encoded point */
**p = *len;
*p += 1;
/* copy the point */
memcpy(*p, encodedPoint, *len);
/* increment len to account for length field */
*len += 1;
OPENSSL_free(encodedPoint);
return 1;
err:
EVP_PKEY_free(ckey);
return 0;
#else
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_ECDHE, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_construct_cke_gost(SSL *s, unsigned char **p, int *len, int *al)
{
#ifndef OPENSSL_NO_GOST
/* GOST key exchange message creation */
EVP_PKEY_CTX *pkey_ctx = NULL;
X509 *peer_cert;
size_t msglen;
unsigned int md_len;
unsigned char shared_ukm[32], tmp[256];
EVP_MD_CTX *ukm_hash = NULL;
int dgst_nid = NID_id_GostR3411_94;
unsigned char *pms = NULL;
size_t pmslen = 0;
if ((s->s3->tmp.new_cipher->algorithm_auth & SSL_aGOST12) != 0)
dgst_nid = NID_id_GostR3411_2012_256;
/*
* Get server sertificate PKEY and create ctx from it
*/
peer_cert = s->session->peer;
if (!peer_cert) {
*al = SSL_AD_HANDSHAKE_FAILURE;
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST,
SSL_R_NO_GOST_CERTIFICATE_SENT_BY_PEER);
return 0;
}
pkey_ctx = EVP_PKEY_CTX_new(X509_get0_pubkey(peer_cert), NULL);
if (pkey_ctx == NULL) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST, ERR_R_MALLOC_FAILURE);
return 0;
}
/*
* If we have send a certificate, and certificate key
* parameters match those of server certificate, use
* certificate key for key exchange
*/
/* Otherwise, generate ephemeral key pair */
pmslen = 32;
pms = OPENSSL_malloc(pmslen);
if (pms == NULL) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST, ERR_R_MALLOC_FAILURE);
goto err;
}
if (EVP_PKEY_encrypt_init(pkey_ctx) <= 0
/* Generate session key */
|| RAND_bytes(pms, pmslen) <= 0) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST, ERR_R_INTERNAL_ERROR);
goto err;
};
/*
* Compute shared IV and store it in algorithm-specific context
* data
*/
ukm_hash = EVP_MD_CTX_new();
if (ukm_hash == NULL
|| EVP_DigestInit(ukm_hash, EVP_get_digestbynid(dgst_nid)) <= 0
|| EVP_DigestUpdate(ukm_hash, s->s3->client_random,
SSL3_RANDOM_SIZE) <= 0
|| EVP_DigestUpdate(ukm_hash, s->s3->server_random,
SSL3_RANDOM_SIZE) <= 0
|| EVP_DigestFinal_ex(ukm_hash, shared_ukm, &md_len) <= 0) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST, ERR_R_INTERNAL_ERROR);
goto err;
}
EVP_MD_CTX_free(ukm_hash);
ukm_hash = NULL;
if (EVP_PKEY_CTX_ctrl(pkey_ctx, -1, EVP_PKEY_OP_ENCRYPT,
EVP_PKEY_CTRL_SET_IV, 8, shared_ukm) < 0) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST, SSL_R_LIBRARY_BUG);
goto err;
}
/* Make GOST keytransport blob message */
/*
* Encapsulate it into sequence
*/
*((*p)++) = V_ASN1_SEQUENCE | V_ASN1_CONSTRUCTED;
msglen = 255;
if (EVP_PKEY_encrypt(pkey_ctx, tmp, &msglen, pms, pmslen) <= 0) {
*al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST, SSL_R_LIBRARY_BUG);
goto err;
}
if (msglen >= 0x80) {
*((*p)++) = 0x81;
*((*p)++) = msglen & 0xff;
*len = msglen + 3;
} else {
*((*p)++) = msglen & 0xff;
*len = msglen + 2;
}
memcpy(*p, tmp, msglen);
EVP_PKEY_CTX_free(pkey_ctx);
s->s3->tmp.pms = pms;
s->s3->tmp.pmslen = pmslen;
return 1;
err:
EVP_PKEY_CTX_free(pkey_ctx);
OPENSSL_clear_free(pms, pmslen);
EVP_MD_CTX_free(ukm_hash);
return 0;
#else
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_GOST, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
static int tls_construct_cke_srp(SSL *s, unsigned char **p, int *len, int *al)
{
#ifndef OPENSSL_NO_SRP
if (s->srp_ctx.A != NULL) {
/* send off the data */
*len = BN_num_bytes(s->srp_ctx.A);
s2n(*len, *p);
BN_bn2bin(s->srp_ctx.A, *p);
*len += 2;
} else {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_SRP, ERR_R_INTERNAL_ERROR);
return 0;
}
OPENSSL_free(s->session->srp_username);
s->session->srp_username = OPENSSL_strdup(s->srp_ctx.login);
if (s->session->srp_username == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_SRP, ERR_R_MALLOC_FAILURE);
return 0;
}
return 1;
#else
SSLerr(SSL_F_TLS_CONSTRUCT_CKE_SRP, ERR_R_INTERNAL_ERROR);
*al = SSL_AD_INTERNAL_ERROR;
return 0;
#endif
}
int tls_construct_client_key_exchange(SSL *s)
{
unsigned char *p;
int len;
size_t pskhdrlen = 0;
unsigned long alg_k;
int al = -1;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
p = ssl_handshake_start(s);
if ((alg_k & SSL_PSK)
&& !tls_construct_cke_psk_preamble(s, &p, &pskhdrlen, &al))
goto err;
if (alg_k & SSL_kPSK) {
len = 0;
} else if (alg_k & (SSL_kRSA | SSL_kRSAPSK)) {
if (!tls_construct_cke_rsa(s, &p, &len, &al))
goto err;
} else if (alg_k & (SSL_kDHE | SSL_kDHEPSK)) {
if (!tls_construct_cke_dhe(s, &p, &len, &al))
goto err;
} else if (alg_k & (SSL_kECDHE | SSL_kECDHEPSK)) {
if (!tls_construct_cke_ecdhe(s, &p, &len, &al))
goto err;
} else if (alg_k & SSL_kGOST) {
if (!tls_construct_cke_gost(s, &p, &len, &al))
goto err;
} else if (alg_k & SSL_kSRP) {
if (!tls_construct_cke_srp(s, &p, &len, &al))
goto err;
} else {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
len += pskhdrlen;
if (!ssl_set_handshake_header(s, SSL3_MT_CLIENT_KEY_EXCHANGE, len)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_HANDSHAKE_FAILURE);
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_KEY_EXCHANGE, ERR_R_INTERNAL_ERROR);
goto err;
}
return 1;
err:
if (al != -1)
ssl3_send_alert(s, SSL3_AL_FATAL, al);
OPENSSL_clear_free(s->s3->tmp.pms, s->s3->tmp.pmslen);
s->s3->tmp.pms = NULL;
#ifndef OPENSSL_NO_PSK
OPENSSL_clear_free(s->s3->tmp.psk, s->s3->tmp.psklen);
s->s3->tmp.psk = NULL;
#endif
ossl_statem_set_error(s);
return 0;
}
int tls_client_key_exchange_post_work(SSL *s)
{
unsigned char *pms = NULL;
size_t pmslen = 0;
pms = s->s3->tmp.pms;
pmslen = s->s3->tmp.pmslen;
#ifndef OPENSSL_NO_SRP
/* Check for SRP */
if (s->s3->tmp.new_cipher->algorithm_mkey & SSL_kSRP) {
if (!srp_generate_client_master_secret(s)) {
SSLerr(SSL_F_TLS_CLIENT_KEY_EXCHANGE_POST_WORK,
ERR_R_INTERNAL_ERROR);
goto err;
}
return 1;
}
#endif
if (pms == NULL && !(s->s3->tmp.new_cipher->algorithm_mkey & SSL_kPSK)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
SSLerr(SSL_F_TLS_CLIENT_KEY_EXCHANGE_POST_WORK, ERR_R_MALLOC_FAILURE);
goto err;
}
if (!ssl_generate_master_secret(s, pms, pmslen, 1)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
SSLerr(SSL_F_TLS_CLIENT_KEY_EXCHANGE_POST_WORK, ERR_R_INTERNAL_ERROR);
/* ssl_generate_master_secret frees the pms even on error */
pms = NULL;
pmslen = 0;
goto err;
}
pms = NULL;
pmslen = 0;
#ifndef OPENSSL_NO_SCTP
if (SSL_IS_DTLS(s)) {
unsigned char sctpauthkey[64];
char labelbuffer[sizeof(DTLS1_SCTP_AUTH_LABEL)];
/*
* Add new shared key for SCTP-Auth, will be ignored if no SCTP
* used.
*/
memcpy(labelbuffer, DTLS1_SCTP_AUTH_LABEL,
sizeof(DTLS1_SCTP_AUTH_LABEL));
if (SSL_export_keying_material(s, sctpauthkey,
sizeof(sctpauthkey), labelbuffer,
sizeof(labelbuffer), NULL, 0, 0) <= 0)
goto err;
BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_ADD_AUTH_KEY,
sizeof(sctpauthkey), sctpauthkey);
}
#endif
return 1;
err:
OPENSSL_clear_free(pms, pmslen);
s->s3->tmp.pms = NULL;
return 0;
}
int tls_construct_client_verify(SSL *s)
{
unsigned char *p;
EVP_PKEY *pkey;
const EVP_MD *md = s->s3->tmp.md[s->cert->key - s->cert->pkeys];
EVP_MD_CTX *mctx;
unsigned u = 0;
unsigned long n = 0;
long hdatalen = 0;
void *hdata;
mctx = EVP_MD_CTX_new();
if (mctx == NULL) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_VERIFY, ERR_R_MALLOC_FAILURE);
goto err;
}
p = ssl_handshake_start(s);
pkey = s->cert->key->privatekey;
hdatalen = BIO_get_mem_data(s->s3->handshake_buffer, &hdata);
if (hdatalen <= 0) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_VERIFY, ERR_R_INTERNAL_ERROR);
goto err;
}
if (SSL_USE_SIGALGS(s)) {
if (!tls12_get_sigandhash(p, pkey, md)) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_VERIFY, ERR_R_INTERNAL_ERROR);
goto err;
}
p += 2;
n = 2;
}
#ifdef SSL_DEBUG
fprintf(stderr, "Using client alg %s\n", EVP_MD_name(md));
#endif
if (!EVP_SignInit_ex(mctx, md, NULL)
|| !EVP_SignUpdate(mctx, hdata, hdatalen)
|| (s->version == SSL3_VERSION
&& !EVP_MD_CTX_ctrl(mctx, EVP_CTRL_SSL3_MASTER_SECRET,
s->session->master_key_length,
s->session->master_key))
|| !EVP_SignFinal(mctx, p + 2, &u, pkey)) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_VERIFY, ERR_R_EVP_LIB);
goto err;
}
#ifndef OPENSSL_NO_GOST
{
int pktype = EVP_PKEY_id(pkey);
if (pktype == NID_id_GostR3410_2001
|| pktype == NID_id_GostR3410_2012_256
|| pktype == NID_id_GostR3410_2012_512)
BUF_reverse(p + 2, NULL, u);
}
#endif
s2n(u, p);
n += u + 2;
/* Digest cached records and discard handshake buffer */
if (!ssl3_digest_cached_records(s, 0))
goto err;
if (!ssl_set_handshake_header(s, SSL3_MT_CERTIFICATE_VERIFY, n)) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_VERIFY, ERR_R_INTERNAL_ERROR);
goto err;
}
EVP_MD_CTX_free(mctx);
return 1;
err:
EVP_MD_CTX_free(mctx);
return 0;
}
/*
* Check a certificate can be used for client authentication. Currently check
* cert exists, if we have a suitable digest for TLS 1.2 if static DH client
* certificates can be used and optionally checks suitability for Suite B.
*/
static int ssl3_check_client_certificate(SSL *s)
{
if (!s->cert || !s->cert->key->x509 || !s->cert->key->privatekey)
return 0;
/* If no suitable signature algorithm can't use certificate */
if (SSL_USE_SIGALGS(s) && !s->s3->tmp.md[s->cert->key - s->cert->pkeys])
return 0;
/*
* If strict mode check suitability of chain before using it. This also
* adjusts suite B digest if necessary.
*/
if (s->cert->cert_flags & SSL_CERT_FLAGS_CHECK_TLS_STRICT &&
!tls1_check_chain(s, NULL, NULL, NULL, -2))
return 0;
return 1;
}
WORK_STATE tls_prepare_client_certificate(SSL *s, WORK_STATE wst)
{
X509 *x509 = NULL;
EVP_PKEY *pkey = NULL;
int i;
if (wst == WORK_MORE_A) {
/* Let cert callback update client certificates if required */
if (s->cert->cert_cb) {
i = s->cert->cert_cb(s, s->cert->cert_cb_arg);
if (i < 0) {
s->rwstate = SSL_X509_LOOKUP;
return WORK_MORE_A;
}
if (i == 0) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
ossl_statem_set_error(s);
return 0;
}
s->rwstate = SSL_NOTHING;
}
if (ssl3_check_client_certificate(s))
return WORK_FINISHED_CONTINUE;
/* Fall through to WORK_MORE_B */
wst = WORK_MORE_B;
}
/* We need to get a client cert */
if (wst == WORK_MORE_B) {
/*
* If we get an error, we need to ssl->rwstate=SSL_X509_LOOKUP;
* return(-1); We then get retied later
*/
i = ssl_do_client_cert_cb(s, &x509, &pkey);
if (i < 0) {
s->rwstate = SSL_X509_LOOKUP;
return WORK_MORE_B;
}
s->rwstate = SSL_NOTHING;
if ((i == 1) && (pkey != NULL) && (x509 != NULL)) {
if (!SSL_use_certificate(s, x509) || !SSL_use_PrivateKey(s, pkey))
i = 0;
} else if (i == 1) {
i = 0;
SSLerr(SSL_F_TLS_PREPARE_CLIENT_CERTIFICATE,
SSL_R_BAD_DATA_RETURNED_BY_CALLBACK);
}
X509_free(x509);
EVP_PKEY_free(pkey);
if (i && !ssl3_check_client_certificate(s))
i = 0;
if (i == 0) {
if (s->version == SSL3_VERSION) {
s->s3->tmp.cert_req = 0;
ssl3_send_alert(s, SSL3_AL_WARNING, SSL_AD_NO_CERTIFICATE);
return WORK_FINISHED_CONTINUE;
} else {
s->s3->tmp.cert_req = 2;
if (!ssl3_digest_cached_records(s, 0)) {
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
ossl_statem_set_error(s);
return 0;
}
}
}
return WORK_FINISHED_CONTINUE;
}
/* Shouldn't ever get here */
return WORK_ERROR;
}
int tls_construct_client_certificate(SSL *s)
{
if (!ssl3_output_cert_chain(s,
(s->s3->tmp.cert_req ==
2) ? NULL : s->cert->key)) {
SSLerr(SSL_F_TLS_CONSTRUCT_CLIENT_CERTIFICATE, ERR_R_INTERNAL_ERROR);
ssl3_send_alert(s, SSL3_AL_FATAL, SSL_AD_INTERNAL_ERROR);
ossl_statem_set_error(s);
return 0;
}
return 1;
}
#define has_bits(i,m) (((i)&(m)) == (m))
int ssl3_check_cert_and_algorithm(SSL *s)
{
int i;
#ifndef OPENSSL_NO_EC
int idx;
#endif
long alg_k, alg_a;
EVP_PKEY *pkey = NULL;
int al = SSL_AD_HANDSHAKE_FAILURE;
alg_k = s->s3->tmp.new_cipher->algorithm_mkey;
alg_a = s->s3->tmp.new_cipher->algorithm_auth;
/* we don't have a certificate */
if ((alg_a & SSL_aNULL) || (alg_k & SSL_kPSK))
return (1);
/* This is the passed certificate */
#ifndef OPENSSL_NO_EC
idx = s->session->peer_type;
if (idx == SSL_PKEY_ECC) {
if (ssl_check_srvr_ecc_cert_and_alg(s->session->peer, s) == 0) {
/* check failed */
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, SSL_R_BAD_ECC_CERT);
goto f_err;
} else {
return 1;
}
} else if (alg_a & SSL_aECDSA) {
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,
SSL_R_MISSING_ECDSA_SIGNING_CERT);
goto f_err;
}
#endif
pkey = X509_get0_pubkey(s->session->peer);
i = X509_certificate_type(s->session->peer, pkey);
/* Check that we have a certificate if we require one */
if ((alg_a & SSL_aRSA) && !has_bits(i, EVP_PK_RSA | EVP_PKT_SIGN)) {
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,
SSL_R_MISSING_RSA_SIGNING_CERT);
goto f_err;
}
#ifndef OPENSSL_NO_DSA
else if ((alg_a & SSL_aDSS) && !has_bits(i, EVP_PK_DSA | EVP_PKT_SIGN)) {
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,
SSL_R_MISSING_DSA_SIGNING_CERT);
goto f_err;
}
#endif
#ifndef OPENSSL_NO_RSA
if (alg_k & (SSL_kRSA | SSL_kRSAPSK) &&
!has_bits(i, EVP_PK_RSA | EVP_PKT_ENC)) {
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM,
SSL_R_MISSING_RSA_ENCRYPTING_CERT);
goto f_err;
}
#endif
#ifndef OPENSSL_NO_DH
if ((alg_k & SSL_kDHE) && (s->s3->peer_tmp == NULL)) {
al = SSL_AD_INTERNAL_ERROR;
SSLerr(SSL_F_SSL3_CHECK_CERT_AND_ALGORITHM, ERR_R_INTERNAL_ERROR);
goto f_err;
}
#endif
return (1);
f_err:
ssl3_send_alert(s, SSL3_AL_FATAL, al);
return (0);
}
#ifndef OPENSSL_NO_NEXTPROTONEG
int tls_construct_next_proto(SSL *s)
{
unsigned int len, padding_len;
unsigned char *d;
len = s->next_proto_negotiated_len;
padding_len = 32 - ((len + 2) % 32);
d = (unsigned char *)s->init_buf->data;
d[4] = len;
memcpy(d + 5, s->next_proto_negotiated, len);
d[5 + len] = padding_len;
memset(d + 6 + len, 0, padding_len);
*(d++) = SSL3_MT_NEXT_PROTO;
l2n3(2 + len + padding_len, d);
s->init_num = 4 + 2 + len + padding_len;
s->init_off = 0;
return 1;
}
#endif
int ssl_do_client_cert_cb(SSL *s, X509 **px509, EVP_PKEY **ppkey)
{
int i = 0;
#ifndef OPENSSL_NO_ENGINE
if (s->ctx->client_cert_engine) {
i = ENGINE_load_ssl_client_cert(s->ctx->client_cert_engine, s,
SSL_get_client_CA_list(s),
px509, ppkey, NULL, NULL, NULL);
if (i != 0)
return i;
}
#endif
if (s->ctx->client_cert_cb)
i = s->ctx->client_cert_cb(s, px509, ppkey);
return i;
}
int ssl_cipher_list_to_bytes(SSL *s, STACK_OF(SSL_CIPHER) *sk, unsigned char *p)
{
int i, j = 0;
const SSL_CIPHER *c;
unsigned char *q;
int empty_reneg_info_scsv = !s->renegotiate;
/* Set disabled masks for this session */
ssl_set_client_disabled(s);
if (sk == NULL)
return (0);
q = p;
for (i = 0; i < sk_SSL_CIPHER_num(sk); i++) {
c = sk_SSL_CIPHER_value(sk, i);
/* Skip disabled ciphers */
if (ssl_cipher_disabled(s, c, SSL_SECOP_CIPHER_SUPPORTED))
continue;
j = s->method->put_cipher_by_char(c, p);
p += j;
}
/*
* If p == q, no ciphers; caller indicates an error. Otherwise, add
* applicable SCSVs.
*/
if (p != q) {
if (empty_reneg_info_scsv) {
static SSL_CIPHER scsv = {
0, NULL, SSL3_CK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
j = s->method->put_cipher_by_char(&scsv, p);
p += j;
}
if (s->mode & SSL_MODE_SEND_FALLBACK_SCSV) {
static SSL_CIPHER scsv = {
0, NULL, SSL3_CK_FALLBACK_SCSV, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
j = s->method->put_cipher_by_char(&scsv, p);
p += j;
}
}
return (p - q);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3067_0 |
crossvul-cpp_data_bad_2200_0 | /*
* Copyright (C) 2006,2008 by the Massachusetts Institute of Technology.
* All rights reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* A module that implements the spnego security mechanism.
* It is used to negotiate the security mechanism between
* peers using the GSS-API. SPNEGO is specified in RFC 4178.
*
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* #pragma ident "@(#)spnego_mech.c 1.7 04/09/28 SMI" */
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <k5-int.h>
#include <krb5.h>
#include <mglueP.h>
#include "gssapiP_spnego.h"
#include <gssapi_err_generic.h>
#undef g_token_size
#undef g_verify_token_header
#undef g_make_token_header
#define HARD_ERROR(v) ((v) != GSS_S_COMPLETE && (v) != GSS_S_CONTINUE_NEEDED)
typedef const gss_OID_desc *gss_OID_const;
/* der routines defined in libgss */
extern unsigned int gssint_der_length_size(unsigned int);
extern int gssint_get_der_length(unsigned char **, unsigned int,
unsigned int*);
extern int gssint_put_der_length(unsigned int, unsigned char **, unsigned int);
/* private routines for spnego_mechanism */
static spnego_token_t make_spnego_token(const char *);
static gss_buffer_desc make_err_msg(const char *);
static int g_token_size(gss_OID_const, unsigned int);
static int g_make_token_header(gss_OID_const, unsigned int,
unsigned char **, unsigned int);
static int g_verify_token_header(gss_OID_const, unsigned int *,
unsigned char **,
int, unsigned int);
static int g_verify_neg_token_init(unsigned char **, unsigned int);
static gss_OID get_mech_oid(OM_uint32 *, unsigned char **, size_t);
static gss_buffer_t get_input_token(unsigned char **, unsigned int);
static gss_OID_set get_mech_set(OM_uint32 *, unsigned char **, unsigned int);
static OM_uint32 get_req_flags(unsigned char **, OM_uint32, OM_uint32 *);
static OM_uint32 get_available_mechs(OM_uint32 *, gss_name_t, gss_cred_usage_t,
gss_const_key_value_set_t,
gss_cred_id_t *, gss_OID_set *);
static OM_uint32 get_negotiable_mechs(OM_uint32 *, spnego_gss_cred_id_t,
gss_cred_usage_t, gss_OID_set *);
static void release_spnego_ctx(spnego_gss_ctx_id_t *);
static void check_spnego_options(spnego_gss_ctx_id_t);
static spnego_gss_ctx_id_t create_spnego_ctx(void);
static int put_mech_set(gss_OID_set mechSet, gss_buffer_t buf);
static int put_input_token(unsigned char **, gss_buffer_t, unsigned int);
static int put_mech_oid(unsigned char **, gss_OID_const, unsigned int);
static int put_negResult(unsigned char **, OM_uint32, unsigned int);
static OM_uint32
process_mic(OM_uint32 *, gss_buffer_t, spnego_gss_ctx_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
handle_mic(OM_uint32 *, gss_buffer_t, int, spnego_gss_ctx_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_new(OM_uint32 *, spnego_gss_cred_id_t, gss_ctx_id_t *,
send_token_flag *);
static OM_uint32
init_ctx_nego(OM_uint32 *, spnego_gss_ctx_id_t, OM_uint32, gss_OID,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_cont(OM_uint32 *, gss_ctx_id_t *, gss_buffer_t,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_reselect(OM_uint32 *, spnego_gss_ctx_id_t, OM_uint32,
gss_OID, gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_call_init(OM_uint32 *, spnego_gss_ctx_id_t, spnego_gss_cred_id_t,
gss_name_t, OM_uint32, OM_uint32, gss_buffer_t,
gss_OID *, gss_buffer_t, OM_uint32 *, OM_uint32 *,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_new(OM_uint32 *, gss_buffer_t, gss_ctx_id_t *,
spnego_gss_cred_id_t, gss_buffer_t *,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_cont(OM_uint32 *, gss_buffer_t, gss_ctx_id_t *,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_vfy_oid(OM_uint32 *, spnego_gss_ctx_id_t, gss_OID,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_call_acc(OM_uint32 *, spnego_gss_ctx_id_t, spnego_gss_cred_id_t,
gss_buffer_t, gss_OID *, gss_buffer_t,
OM_uint32 *, OM_uint32 *, gss_cred_id_t *,
OM_uint32 *, send_token_flag *);
static gss_OID
negotiate_mech(gss_OID_set, gss_OID_set, OM_uint32 *);
static int
g_get_tag_and_length(unsigned char **, int, unsigned int, unsigned int *);
static int
make_spnego_tokenInit_msg(spnego_gss_ctx_id_t,
int,
gss_buffer_t,
OM_uint32, gss_buffer_t, send_token_flag,
gss_buffer_t);
static int
make_spnego_tokenTarg_msg(OM_uint32, gss_OID, gss_buffer_t,
gss_buffer_t, send_token_flag,
gss_buffer_t);
static OM_uint32
get_negTokenInit(OM_uint32 *, gss_buffer_t, gss_buffer_t,
gss_OID_set *, OM_uint32 *, gss_buffer_t *,
gss_buffer_t *);
static OM_uint32
get_negTokenResp(OM_uint32 *, unsigned char *, unsigned int,
OM_uint32 *, gss_OID *, gss_buffer_t *, gss_buffer_t *);
static int
is_kerb_mech(gss_OID oid);
/* SPNEGO oid structure */
static const gss_OID_desc spnego_oids[] = {
{SPNEGO_OID_LENGTH, SPNEGO_OID},
};
const gss_OID_desc * const gss_mech_spnego = spnego_oids+0;
static const gss_OID_set_desc spnego_oidsets[] = {
{1, (gss_OID) spnego_oids+0},
};
const gss_OID_set_desc * const gss_mech_set_spnego = spnego_oidsets+0;
static int make_NegHints(OM_uint32 *, spnego_gss_cred_id_t, gss_buffer_t *);
static int put_neg_hints(unsigned char **, gss_buffer_t, unsigned int);
static OM_uint32
acc_ctx_hints(OM_uint32 *, gss_ctx_id_t *, spnego_gss_cred_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
/*
* The Mech OID for SPNEGO:
* { iso(1) org(3) dod(6) internet(1) security(5)
* mechanism(5) spnego(2) }
*/
static struct gss_config spnego_mechanism =
{
{SPNEGO_OID_LENGTH, SPNEGO_OID},
NULL,
spnego_gss_acquire_cred,
spnego_gss_release_cred,
spnego_gss_init_sec_context,
#ifndef LEAN_CLIENT
spnego_gss_accept_sec_context,
#else
NULL,
#endif /* LEAN_CLIENT */
NULL, /* gss_process_context_token */
spnego_gss_delete_sec_context, /* gss_delete_sec_context */
spnego_gss_context_time, /* gss_context_time */
spnego_gss_get_mic, /* gss_get_mic */
spnego_gss_verify_mic, /* gss_verify_mic */
spnego_gss_wrap, /* gss_wrap */
spnego_gss_unwrap, /* gss_unwrap */
spnego_gss_display_status,
NULL, /* gss_indicate_mechs */
spnego_gss_compare_name,
spnego_gss_display_name,
spnego_gss_import_name,
spnego_gss_release_name,
spnego_gss_inquire_cred, /* gss_inquire_cred */
NULL, /* gss_add_cred */
#ifndef LEAN_CLIENT
spnego_gss_export_sec_context, /* gss_export_sec_context */
spnego_gss_import_sec_context, /* gss_import_sec_context */
#else
NULL, /* gss_export_sec_context */
NULL, /* gss_import_sec_context */
#endif /* LEAN_CLIENT */
NULL, /* gss_inquire_cred_by_mech */
spnego_gss_inquire_names_for_mech,
spnego_gss_inquire_context, /* gss_inquire_context */
NULL, /* gss_internal_release_oid */
spnego_gss_wrap_size_limit, /* gss_wrap_size_limit */
NULL, /* gssd_pname_to_uid */
NULL, /* gss_userok */
NULL, /* gss_export_name */
spnego_gss_duplicate_name, /* gss_duplicate_name */
NULL, /* gss_store_cred */
spnego_gss_inquire_sec_context_by_oid, /* gss_inquire_sec_context_by_oid */
spnego_gss_inquire_cred_by_oid, /* gss_inquire_cred_by_oid */
spnego_gss_set_sec_context_option, /* gss_set_sec_context_option */
spnego_gss_set_cred_option, /* gssspi_set_cred_option */
NULL, /* gssspi_mech_invoke */
spnego_gss_wrap_aead,
spnego_gss_unwrap_aead,
spnego_gss_wrap_iov,
spnego_gss_unwrap_iov,
spnego_gss_wrap_iov_length,
spnego_gss_complete_auth_token,
spnego_gss_acquire_cred_impersonate_name,
NULL, /* gss_add_cred_impersonate_name */
spnego_gss_display_name_ext,
spnego_gss_inquire_name,
spnego_gss_get_name_attribute,
spnego_gss_set_name_attribute,
spnego_gss_delete_name_attribute,
spnego_gss_export_name_composite,
spnego_gss_map_name_to_any,
spnego_gss_release_any_name_mapping,
spnego_gss_pseudo_random,
spnego_gss_set_neg_mechs,
spnego_gss_inquire_saslname_for_mech,
spnego_gss_inquire_mech_for_saslname,
spnego_gss_inquire_attrs_for_mech,
spnego_gss_acquire_cred_from,
NULL, /* gss_store_cred_into */
spnego_gss_acquire_cred_with_password,
spnego_gss_export_cred,
spnego_gss_import_cred,
NULL, /* gssspi_import_sec_context_by_mech */
NULL, /* gssspi_import_name_by_mech */
NULL, /* gssspi_import_cred_by_mech */
spnego_gss_get_mic_iov,
spnego_gss_verify_mic_iov,
spnego_gss_get_mic_iov_length
};
#ifdef _GSS_STATIC_LINK
#include "mglueP.h"
static int gss_spnegomechglue_init(void)
{
struct gss_mech_config mech_spnego;
memset(&mech_spnego, 0, sizeof(mech_spnego));
mech_spnego.mech = &spnego_mechanism;
mech_spnego.mechNameStr = "spnego";
mech_spnego.mech_type = GSS_C_NO_OID;
return gssint_register_mechinfo(&mech_spnego);
}
#else
gss_mechanism KRB5_CALLCONV
gss_mech_initialize(void)
{
return (&spnego_mechanism);
}
MAKE_INIT_FUNCTION(gss_krb5int_lib_init);
MAKE_FINI_FUNCTION(gss_krb5int_lib_fini);
int gss_krb5int_lib_init(void);
#endif /* _GSS_STATIC_LINK */
int gss_spnegoint_lib_init(void)
{
int err;
err = k5_key_register(K5_KEY_GSS_SPNEGO_STATUS, NULL);
if (err)
return err;
#ifdef _GSS_STATIC_LINK
return gss_spnegomechglue_init();
#else
return 0;
#endif
}
void gss_spnegoint_lib_fini(void)
{
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred(OM_uint32 *minor_status,
gss_name_t desired_name,
OM_uint32 time_req,
gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
return spnego_gss_acquire_cred_from(minor_status, desired_name, time_req,
desired_mechs, cred_usage, NULL,
output_cred_handle, actual_mechs,
time_rec);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_from(OM_uint32 *minor_status,
const gss_name_t desired_name,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_const_key_value_set_t cred_store,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status, tmpmin;
gss_OID_set amechs;
gss_cred_id_t mcred = NULL;
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_acquire_cred\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
/* We will obtain a mechglue credential and wrap it in a
* spnego_gss_cred_id_rec structure. Allocate the wrapper. */
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->neg_mechs = GSS_C_NULL_OID_SET;
/*
* Always use get_available_mechs to collect a list of
* mechs for which creds are available.
*/
status = get_available_mechs(minor_status, desired_name,
cred_usage, cred_store, &mcred,
&amechs);
if (actual_mechs && amechs != GSS_C_NULL_OID_SET) {
(void) gssint_copy_oid_set(&tmpmin, amechs, actual_mechs);
}
(void) gss_release_oid_set(&tmpmin, &amechs);
if (status == GSS_S_COMPLETE) {
spcred->mcred = mcred;
*output_cred_handle = (gss_cred_id_t)spcred;
} else {
free(spcred);
*output_cred_handle = GSS_C_NO_CREDENTIAL;
}
dsyslog("Leaving spnego_gss_acquire_cred\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_release_cred(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle)
{
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_release_cred\n");
if (minor_status == NULL || cred_handle == NULL)
return (GSS_S_CALL_INACCESSIBLE_WRITE);
*minor_status = 0;
if (*cred_handle == GSS_C_NO_CREDENTIAL)
return (GSS_S_COMPLETE);
spcred = (spnego_gss_cred_id_t)*cred_handle;
*cred_handle = GSS_C_NO_CREDENTIAL;
gss_release_oid_set(minor_status, &spcred->neg_mechs);
gss_release_cred(minor_status, &spcred->mcred);
free(spcred);
dsyslog("Leaving spnego_gss_release_cred\n");
return (GSS_S_COMPLETE);
}
static void
check_spnego_options(spnego_gss_ctx_id_t spnego_ctx)
{
spnego_ctx->optionStr = gssint_get_modOptions(
(const gss_OID)&spnego_oids[0]);
}
static spnego_gss_ctx_id_t
create_spnego_ctx(void)
{
spnego_gss_ctx_id_t spnego_ctx = NULL;
spnego_ctx = (spnego_gss_ctx_id_t)
malloc(sizeof (spnego_gss_ctx_id_rec));
if (spnego_ctx == NULL) {
return (NULL);
}
spnego_ctx->magic_num = SPNEGO_MAGIC_ID;
spnego_ctx->ctx_handle = GSS_C_NO_CONTEXT;
spnego_ctx->mech_set = NULL;
spnego_ctx->internal_mech = NULL;
spnego_ctx->optionStr = NULL;
spnego_ctx->DER_mechTypes.length = 0;
spnego_ctx->DER_mechTypes.value = NULL;
spnego_ctx->default_cred = GSS_C_NO_CREDENTIAL;
spnego_ctx->mic_reqd = 0;
spnego_ctx->mic_sent = 0;
spnego_ctx->mic_rcvd = 0;
spnego_ctx->mech_complete = 0;
spnego_ctx->nego_done = 0;
spnego_ctx->internal_name = GSS_C_NO_NAME;
spnego_ctx->actual_mech = GSS_C_NO_OID;
check_spnego_options(spnego_ctx);
return (spnego_ctx);
}
/*
* Both initiator and acceptor call here to verify and/or create mechListMIC,
* and to consistency-check the MIC state. handle_mic is invoked only if the
* negotiated mech has completed and supports MICs.
*/
static OM_uint32
handle_mic(OM_uint32 *minor_status, gss_buffer_t mic_in,
int send_mechtok, spnego_gss_ctx_id_t sc,
gss_buffer_t *mic_out,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
ret = GSS_S_FAILURE;
*mic_out = GSS_C_NO_BUFFER;
if (mic_in != GSS_C_NO_BUFFER) {
if (sc->mic_rcvd) {
/* Reject MIC if we've already received a MIC. */
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_DEFECTIVE_TOKEN;
}
} else if (sc->mic_reqd && !send_mechtok) {
/*
* If the peer sends the final mechanism token, it
* must send the MIC with that token if the
* negotiation requires MICs.
*/
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_DEFECTIVE_TOKEN;
}
ret = process_mic(minor_status, mic_in, sc, mic_out,
negState, tokflag);
if (ret != GSS_S_COMPLETE) {
return ret;
}
if (sc->mic_reqd) {
assert(sc->mic_sent || sc->mic_rcvd);
}
if (sc->mic_sent && sc->mic_rcvd) {
ret = GSS_S_COMPLETE;
*negState = ACCEPT_COMPLETE;
if (*mic_out == GSS_C_NO_BUFFER) {
/*
* We sent a MIC on the previous pass; we
* shouldn't be sending a mechanism token.
*/
assert(!send_mechtok);
*tokflag = NO_TOKEN_SEND;
} else {
*tokflag = CONT_TOKEN_SEND;
}
} else if (sc->mic_reqd) {
*negState = ACCEPT_INCOMPLETE;
ret = GSS_S_CONTINUE_NEEDED;
} else if (*negState == ACCEPT_COMPLETE) {
ret = GSS_S_COMPLETE;
} else {
ret = GSS_S_CONTINUE_NEEDED;
}
return ret;
}
/*
* Perform the actual verification and/or generation of mechListMIC.
*/
static OM_uint32
process_mic(OM_uint32 *minor_status, gss_buffer_t mic_in,
spnego_gss_ctx_id_t sc, gss_buffer_t *mic_out,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin;
gss_qop_t qop_state;
gss_buffer_desc tmpmic = GSS_C_EMPTY_BUFFER;
ret = GSS_S_FAILURE;
if (mic_in != GSS_C_NO_BUFFER) {
ret = gss_verify_mic(minor_status, sc->ctx_handle,
&sc->DER_mechTypes,
mic_in, &qop_state);
if (ret != GSS_S_COMPLETE) {
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return ret;
}
/* If we got a MIC, we must send a MIC. */
sc->mic_reqd = 1;
sc->mic_rcvd = 1;
}
if (sc->mic_reqd && !sc->mic_sent) {
ret = gss_get_mic(minor_status, sc->ctx_handle,
GSS_C_QOP_DEFAULT,
&sc->DER_mechTypes,
&tmpmic);
if (ret != GSS_S_COMPLETE) {
gss_release_buffer(&tmpmin, &tmpmic);
*tokflag = NO_TOKEN_SEND;
return ret;
}
*mic_out = malloc(sizeof(gss_buffer_desc));
if (*mic_out == GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, &tmpmic);
*tokflag = NO_TOKEN_SEND;
return GSS_S_FAILURE;
}
**mic_out = tmpmic;
sc->mic_sent = 1;
}
return GSS_S_COMPLETE;
}
/*
* Initial call to spnego_gss_init_sec_context().
*/
static OM_uint32
init_ctx_new(OM_uint32 *minor_status,
spnego_gss_cred_id_t spcred,
gss_ctx_id_t *ctx,
send_token_flag *tokflag)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = NULL;
sc = create_spnego_ctx();
if (sc == NULL)
return GSS_S_FAILURE;
/* determine negotiation mech set */
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_INITIATE,
&sc->mech_set);
if (ret != GSS_S_COMPLETE)
goto cleanup;
/* Set an initial internal mech to make the first context token. */
sc->internal_mech = &sc->mech_set->elements[0];
if (put_mech_set(sc->mech_set, &sc->DER_mechTypes) < 0) {
ret = GSS_S_FAILURE;
goto cleanup;
}
/*
* The actual context is not yet determined, set the output
* context handle to refer to the spnego context itself.
*/
sc->ctx_handle = GSS_C_NO_CONTEXT;
*ctx = (gss_ctx_id_t)sc;
sc = NULL;
*tokflag = INIT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
cleanup:
release_spnego_ctx(&sc);
return ret;
}
/*
* Called by second and later calls to spnego_gss_init_sec_context()
* to decode reply and update state.
*/
static OM_uint32
init_ctx_cont(OM_uint32 *minor_status, gss_ctx_id_t *ctx, gss_buffer_t buf,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin, acc_negState;
unsigned char *ptr;
spnego_gss_ctx_id_t sc;
gss_OID supportedMech = GSS_C_NO_OID;
sc = (spnego_gss_ctx_id_t)*ctx;
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ptr = buf->value;
ret = get_negTokenResp(minor_status, ptr, buf->length,
&acc_negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (acc_negState == ACCEPT_DEFECTIVE_TOKEN &&
supportedMech == GSS_C_NO_OID &&
*responseToken == GSS_C_NO_BUFFER &&
*mechListMIC == GSS_C_NO_BUFFER) {
/* Reject "empty" token. */
ret = GSS_S_DEFECTIVE_TOKEN;
}
if (acc_negState == REJECT) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_FAILURE;
goto cleanup;
}
/*
* nego_done is false for the first call to init_ctx_cont()
*/
if (!sc->nego_done) {
ret = init_ctx_nego(minor_status, sc,
acc_negState,
supportedMech, responseToken,
mechListMIC,
negState, tokflag);
} else if ((!sc->mech_complete && *responseToken == GSS_C_NO_BUFFER) ||
(sc->mech_complete && *responseToken != GSS_C_NO_BUFFER)) {
/* Missing or spurious token from acceptor. */
ret = GSS_S_DEFECTIVE_TOKEN;
} else if (!sc->mech_complete ||
(sc->mic_reqd &&
(sc->ctx_flags & GSS_C_INTEG_FLAG))) {
/* Not obviously done; we may decide we're done later in
* init_ctx_call_init or handle_mic. */
*negState = ACCEPT_INCOMPLETE;
*tokflag = CONT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
} else {
/* mech finished on last pass and no MIC required, so done. */
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
}
cleanup:
if (supportedMech != GSS_C_NO_OID)
generic_gss_release_oid(&tmpmin, &supportedMech);
return ret;
}
/*
* Consistency checking and mechanism negotiation handling for second
* call of spnego_gss_init_sec_context(). Call init_ctx_reselect() to
* update internal state if acceptor has counter-proposed.
*/
static OM_uint32
init_ctx_nego(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ret = GSS_S_DEFECTIVE_TOKEN;
/*
* Both supportedMech and negState must be present in first
* acceptor token.
*/
if (supportedMech == GSS_C_NO_OID) {
*minor_status = ERR_SPNEGO_NO_MECH_FROM_ACCEPTOR;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
if (acc_negState == ACCEPT_DEFECTIVE_TOKEN) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
/*
* If the mechanism we sent is not the mechanism returned from
* the server, we need to handle the server's counter
* proposal. There is a bug in SAMBA servers that always send
* the old Kerberos mech OID, even though we sent the new one.
* So we will treat all the Kerberos mech OIDS as the same.
*/
if (!(is_kerb_mech(supportedMech) &&
is_kerb_mech(sc->internal_mech)) &&
!g_OID_equal(supportedMech, sc->internal_mech)) {
ret = init_ctx_reselect(minor_status, sc,
acc_negState, supportedMech,
responseToken, mechListMIC,
negState, tokflag);
} else if (*responseToken == GSS_C_NO_BUFFER) {
if (sc->mech_complete) {
/*
* Mech completed on first call to its
* init_sec_context(). Acceptor sends no mech
* token.
*/
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else {
/*
* Reject missing mech token when optimistic
* mech selected.
*/
*minor_status = ERR_SPNEGO_NO_TOKEN_FROM_ACCEPTOR;
map_errcode(minor_status);
ret = GSS_S_DEFECTIVE_TOKEN;
}
} else if ((*responseToken)->length == 0 && sc->mech_complete) {
/* Handle old IIS servers returning empty token instead of
* null tokens in the non-mutual auth case. */
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else if (sc->mech_complete) {
/* Reject spurious mech token. */
ret = GSS_S_DEFECTIVE_TOKEN;
} else {
*negState = ACCEPT_INCOMPLETE;
*tokflag = CONT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
}
sc->nego_done = 1;
return ret;
}
/*
* Handle acceptor's counter-proposal of an alternative mechanism.
*/
static OM_uint32
init_ctx_reselect(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 tmpmin;
size_t i;
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
/* Find supportedMech in sc->mech_set. */
for (i = 0; i < sc->mech_set->count; i++) {
if (g_OID_equal(supportedMech, &sc->mech_set->elements[i]))
break;
}
if (i == sc->mech_set->count)
return GSS_S_DEFECTIVE_TOKEN;
sc->internal_mech = &sc->mech_set->elements[i];
/*
* Windows 2003 and earlier don't correctly send a
* negState of request-mic when counter-proposing a
* mechanism. They probably don't handle mechListMICs
* properly either.
*/
if (acc_negState != REQUEST_MIC)
return GSS_S_DEFECTIVE_TOKEN;
sc->mech_complete = 0;
sc->mic_reqd = 1;
*negState = REQUEST_MIC;
*tokflag = CONT_TOKEN_SEND;
return GSS_S_CONTINUE_NEEDED;
}
/*
* Wrap call to mechanism gss_init_sec_context() and update state
* accordingly.
*/
static OM_uint32
init_ctx_call_init(OM_uint32 *minor_status,
spnego_gss_ctx_id_t sc,
spnego_gss_cred_id_t spcred,
gss_name_t target_name,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_buffer_t mechtok_in,
gss_OID *actual_mech,
gss_buffer_t mechtok_out,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
OM_uint32 *negState,
send_token_flag *send_token)
{
OM_uint32 ret, tmpret, tmpmin;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_init_sec_context(minor_status,
mcred,
&sc->ctx_handle,
target_name,
sc->internal_mech,
(req_flags | GSS_C_INTEG_FLAG),
time_req,
GSS_C_NO_CHANNEL_BINDINGS,
mechtok_in,
&sc->actual_mech,
mechtok_out,
&sc->ctx_flags,
time_rec);
if (ret == GSS_S_COMPLETE) {
sc->mech_complete = 1;
if (ret_flags != NULL)
*ret_flags = sc->ctx_flags;
/*
* Microsoft SPNEGO implementations expect an even number of
* token exchanges. So if we're sending a final token, ask for
* a zero-length token back from the server. Also ask for a
* token back if this is the first token or if a MIC exchange
* is required.
*/
if (*send_token == CONT_TOKEN_SEND &&
mechtok_out->length == 0 &&
(!sc->mic_reqd ||
!(sc->ctx_flags & GSS_C_INTEG_FLAG))) {
/* The exchange is complete. */
*negState = ACCEPT_COMPLETE;
ret = GSS_S_COMPLETE;
*send_token = NO_TOKEN_SEND;
} else {
/* Ask for one more hop. */
*negState = ACCEPT_INCOMPLETE;
ret = GSS_S_CONTINUE_NEEDED;
}
return ret;
}
if (ret == GSS_S_CONTINUE_NEEDED)
return ret;
if (*send_token != INIT_TOKEN_SEND) {
*send_token = ERROR_TOKEN_SEND;
*negState = REJECT;
return ret;
}
/*
* Since this is the first token, we can fall back to later mechanisms
* in the list. Since the mechanism list is expected to be short, we
* can do this with recursion. If all mechanisms produce errors, the
* caller should get the error from the first mech in the list.
*/
gssalloc_free(sc->mech_set->elements->elements);
memmove(sc->mech_set->elements, sc->mech_set->elements + 1,
--sc->mech_set->count * sizeof(*sc->mech_set->elements));
if (sc->mech_set->count == 0)
goto fail;
gss_release_buffer(&tmpmin, &sc->DER_mechTypes);
if (put_mech_set(sc->mech_set, &sc->DER_mechTypes) < 0)
goto fail;
tmpret = init_ctx_call_init(&tmpmin, sc, spcred, target_name,
req_flags, time_req, mechtok_in,
actual_mech, mechtok_out, ret_flags,
time_rec, negState, send_token);
if (HARD_ERROR(tmpret))
goto fail;
*minor_status = tmpmin;
return tmpret;
fail:
/* Don't output token on error from first call. */
*send_token = NO_TOKEN_SEND;
*negState = REJECT;
return ret;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_init_sec_context(
OM_uint32 *minor_status,
gss_cred_id_t claimant_cred_handle,
gss_ctx_id_t *context_handle,
gss_name_t target_name,
gss_OID mech_type,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_channel_bindings_t input_chan_bindings,
gss_buffer_t input_token,
gss_OID *actual_mech,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec)
{
send_token_flag send_token = NO_TOKEN_SEND;
OM_uint32 tmpmin, ret, negState;
gss_buffer_t mechtok_in, mechListMIC_in, mechListMIC_out;
gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER;
spnego_gss_cred_id_t spcred = NULL;
spnego_gss_ctx_id_t spnego_ctx = NULL;
dsyslog("Entering init_sec_context\n");
mechtok_in = mechListMIC_out = mechListMIC_in = GSS_C_NO_BUFFER;
negState = REJECT;
/*
* This function works in three steps:
*
* 1. Perform mechanism negotiation.
* 2. Invoke the negotiated or optimistic mech's gss_init_sec_context
* function and examine the results.
* 3. Process or generate MICs if necessary.
*
* The three steps share responsibility for determining when the
* exchange is complete. If the selected mech completed in a previous
* call and no MIC exchange is expected, then step 1 will decide. If
* the selected mech completes in this call and no MIC exchange is
* expected, then step 2 will decide. If a MIC exchange is expected,
* then step 3 will decide. If an error occurs in any step, the
* exchange will be aborted, possibly with an error token.
*
* negState determines the state of the negotiation, and is
* communicated to the acceptor if a continuing token is sent.
* send_token is used to indicate what type of token, if any, should be
* generated.
*/
/* Validate arguments. */
if (minor_status != NULL)
*minor_status = 0;
if (output_token != GSS_C_NO_BUFFER) {
output_token->length = 0;
output_token->value = NULL;
}
if (minor_status == NULL ||
output_token == GSS_C_NO_BUFFER ||
context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (actual_mech != NULL)
*actual_mech = GSS_C_NO_OID;
/* Step 1: perform mechanism negotiation. */
spcred = (spnego_gss_cred_id_t)claimant_cred_handle;
if (*context_handle == GSS_C_NO_CONTEXT) {
ret = init_ctx_new(minor_status, spcred,
context_handle, &send_token);
if (ret != GSS_S_CONTINUE_NEEDED) {
goto cleanup;
}
} else {
ret = init_ctx_cont(minor_status, context_handle,
input_token, &mechtok_in,
&mechListMIC_in, &negState, &send_token);
if (HARD_ERROR(ret)) {
goto cleanup;
}
}
/* Step 2: invoke the selected or optimistic mechanism's
* gss_init_sec_context function, if it didn't complete previously. */
spnego_ctx = (spnego_gss_ctx_id_t)*context_handle;
if (!spnego_ctx->mech_complete) {
ret = init_ctx_call_init(
minor_status, spnego_ctx, spcred,
target_name, req_flags,
time_req, mechtok_in,
actual_mech, &mechtok_out,
ret_flags, time_rec,
&negState, &send_token);
}
/* Step 3: process or generate the MIC, if the negotiated mech is
* complete and supports MICs. */
if (!HARD_ERROR(ret) && spnego_ctx->mech_complete &&
(spnego_ctx->ctx_flags & GSS_C_INTEG_FLAG)) {
ret = handle_mic(minor_status,
mechListMIC_in,
(mechtok_out.length != 0),
spnego_ctx, &mechListMIC_out,
&negState, &send_token);
}
cleanup:
if (send_token == INIT_TOKEN_SEND) {
if (make_spnego_tokenInit_msg(spnego_ctx,
0,
mechListMIC_out,
req_flags,
&mechtok_out, send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
} else if (send_token != NO_TOKEN_SEND) {
if (make_spnego_tokenTarg_msg(negState, GSS_C_NO_OID,
&mechtok_out, mechListMIC_out,
send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
}
gss_release_buffer(&tmpmin, &mechtok_out);
if (ret == GSS_S_COMPLETE) {
/*
* Now, switch the output context to refer to the
* negotiated mechanism's context.
*/
*context_handle = (gss_ctx_id_t)spnego_ctx->ctx_handle;
if (actual_mech != NULL)
*actual_mech = spnego_ctx->actual_mech;
if (ret_flags != NULL)
*ret_flags = spnego_ctx->ctx_flags;
release_spnego_ctx(&spnego_ctx);
} else if (ret != GSS_S_CONTINUE_NEEDED) {
if (spnego_ctx != NULL) {
gss_delete_sec_context(&tmpmin,
&spnego_ctx->ctx_handle,
GSS_C_NO_BUFFER);
release_spnego_ctx(&spnego_ctx);
}
*context_handle = GSS_C_NO_CONTEXT;
}
if (mechtok_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechtok_in);
free(mechtok_in);
}
if (mechListMIC_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_in);
free(mechListMIC_in);
}
if (mechListMIC_out != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_out);
free(mechListMIC_out);
}
return ret;
} /* init_sec_context */
/* We don't want to import KRB5 headers here */
static const gss_OID_desc gss_mech_krb5_oid =
{ 9, "\052\206\110\206\367\022\001\002\002" };
static const gss_OID_desc gss_mech_krb5_wrong_oid =
{ 9, "\052\206\110\202\367\022\001\002\002" };
/*
* verify that the input token length is not 0. If it is, just return.
* If the token length is greater than 0, der encode as a sequence
* and place in buf_out, advancing buf_out.
*/
static int
put_neg_hints(unsigned char **buf_out, gss_buffer_t input_token,
unsigned int buflen)
{
int ret;
/* if token length is 0, we do not want to send */
if (input_token->length == 0)
return (0);
if (input_token->length > buflen)
return (-1);
*(*buf_out)++ = SEQUENCE;
if ((ret = gssint_put_der_length(input_token->length, buf_out,
input_token->length)))
return (ret);
TWRITE_STR(*buf_out, input_token->value, input_token->length);
return (0);
}
/*
* NegHints ::= SEQUENCE {
* hintName [0] GeneralString OPTIONAL,
* hintAddress [1] OCTET STRING OPTIONAL
* }
*/
#define HOST_PREFIX "host@"
#define HOST_PREFIX_LEN (sizeof(HOST_PREFIX) - 1)
static int
make_NegHints(OM_uint32 *minor_status,
spnego_gss_cred_id_t spcred, gss_buffer_t *outbuf)
{
gss_buffer_desc hintNameBuf;
gss_name_t hintName = GSS_C_NO_NAME;
gss_name_t hintKerberosName;
gss_OID hintNameType;
OM_uint32 major_status;
OM_uint32 minor;
unsigned int tlen = 0;
unsigned int hintNameSize = 0;
unsigned char *ptr;
unsigned char *t;
*outbuf = GSS_C_NO_BUFFER;
if (spcred != NULL) {
major_status = gss_inquire_cred(minor_status,
spcred->mcred,
&hintName,
NULL,
NULL,
NULL);
if (major_status != GSS_S_COMPLETE)
return (major_status);
}
if (hintName == GSS_C_NO_NAME) {
krb5_error_code code;
krb5int_access kaccess;
char hostname[HOST_PREFIX_LEN + MAXHOSTNAMELEN + 1] = HOST_PREFIX;
code = krb5int_accessor(&kaccess, KRB5INT_ACCESS_VERSION);
if (code != 0) {
*minor_status = code;
return (GSS_S_FAILURE);
}
/* this breaks mutual authentication but Samba relies on it */
code = (*kaccess.clean_hostname)(NULL, NULL,
&hostname[HOST_PREFIX_LEN],
MAXHOSTNAMELEN);
if (code != 0) {
*minor_status = code;
return (GSS_S_FAILURE);
}
hintNameBuf.value = hostname;
hintNameBuf.length = strlen(hostname);
major_status = gss_import_name(minor_status,
&hintNameBuf,
GSS_C_NT_HOSTBASED_SERVICE,
&hintName);
if (major_status != GSS_S_COMPLETE) {
return (major_status);
}
}
hintNameBuf.value = NULL;
hintNameBuf.length = 0;
major_status = gss_canonicalize_name(minor_status,
hintName,
(gss_OID)&gss_mech_krb5_oid,
&hintKerberosName);
if (major_status != GSS_S_COMPLETE) {
gss_release_name(&minor, &hintName);
return (major_status);
}
gss_release_name(&minor, &hintName);
major_status = gss_display_name(minor_status,
hintKerberosName,
&hintNameBuf,
&hintNameType);
if (major_status != GSS_S_COMPLETE) {
gss_release_name(&minor, &hintName);
return (major_status);
}
gss_release_name(&minor, &hintKerberosName);
/*
* Now encode the name hint into a NegHints ASN.1 type
*/
major_status = GSS_S_FAILURE;
/* Length of DER encoded GeneralString */
tlen = 1 + gssint_der_length_size(hintNameBuf.length) +
hintNameBuf.length;
hintNameSize = tlen;
/* Length of DER encoded hintName */
tlen += 1 + gssint_der_length_size(hintNameSize);
t = gssalloc_malloc(tlen);
if (t == NULL) {
*minor_status = ENOMEM;
goto errout;
}
ptr = t;
*ptr++ = CONTEXT | 0x00; /* hintName identifier */
if (gssint_put_der_length(hintNameSize,
&ptr, tlen - (int)(ptr-t)))
goto errout;
*ptr++ = GENERAL_STRING;
if (gssint_put_der_length(hintNameBuf.length,
&ptr, tlen - (int)(ptr-t)))
goto errout;
memcpy(ptr, hintNameBuf.value, hintNameBuf.length);
ptr += hintNameBuf.length;
*outbuf = (gss_buffer_t)malloc(sizeof(gss_buffer_desc));
if (*outbuf == NULL) {
*minor_status = ENOMEM;
goto errout;
}
(*outbuf)->value = (void *)t;
(*outbuf)->length = ptr - t;
t = NULL; /* don't free */
*minor_status = 0;
major_status = GSS_S_COMPLETE;
errout:
if (t != NULL) {
free(t);
}
gss_release_buffer(&minor, &hintNameBuf);
return (major_status);
}
/*
* Support the Microsoft NegHints extension to SPNEGO for compatibility with
* some versions of Samba. See:
* http://msdn.microsoft.com/en-us/library/cc247039(PROT.10).aspx
*/
static OM_uint32
acc_ctx_hints(OM_uint32 *minor_status,
gss_ctx_id_t *ctx,
spnego_gss_cred_id_t spcred,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 tmpmin, ret;
gss_OID_set supported_mechSet;
spnego_gss_ctx_id_t sc = NULL;
*mechListMIC = GSS_C_NO_BUFFER;
supported_mechSet = GSS_C_NO_OID_SET;
*return_token = NO_TOKEN_SEND;
*negState = REJECT;
*minor_status = 0;
/* A hint request must be the first token received. */
if (*ctx != GSS_C_NO_CONTEXT)
return GSS_S_DEFECTIVE_TOKEN;
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT,
&supported_mechSet);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = make_NegHints(minor_status, spcred, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
sc = create_spnego_ctx();
if (sc == NULL) {
ret = GSS_S_FAILURE;
goto cleanup;
}
if (put_mech_set(supported_mechSet, &sc->DER_mechTypes) < 0) {
ret = GSS_S_FAILURE;
goto cleanup;
}
sc->internal_mech = GSS_C_NO_OID;
*negState = ACCEPT_INCOMPLETE;
*return_token = INIT_TOKEN_SEND;
sc->firstpass = 1;
*ctx = (gss_ctx_id_t)sc;
sc = NULL;
ret = GSS_S_COMPLETE;
cleanup:
release_spnego_ctx(&sc);
gss_release_oid_set(&tmpmin, &supported_mechSet);
return ret;
}
/*
* Set negState to REJECT if the token is defective, else
* ACCEPT_INCOMPLETE or REQUEST_MIC, depending on whether initiator's
* preferred mechanism is supported.
*/
static OM_uint32
acc_ctx_new(OM_uint32 *minor_status,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
spnego_gss_cred_id_t spcred,
gss_buffer_t *mechToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 tmpmin, ret, req_flags;
gss_OID_set supported_mechSet, mechTypes;
gss_buffer_desc der_mechTypes;
gss_OID mech_wanted;
spnego_gss_ctx_id_t sc = NULL;
ret = GSS_S_DEFECTIVE_TOKEN;
der_mechTypes.length = 0;
der_mechTypes.value = NULL;
*mechToken = *mechListMIC = GSS_C_NO_BUFFER;
supported_mechSet = mechTypes = GSS_C_NO_OID_SET;
*return_token = ERROR_TOKEN_SEND;
*negState = REJECT;
*minor_status = 0;
ret = get_negTokenInit(minor_status, buf, &der_mechTypes,
&mechTypes, &req_flags,
mechToken, mechListMIC);
if (ret != GSS_S_COMPLETE) {
goto cleanup;
}
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT,
&supported_mechSet);
if (ret != GSS_S_COMPLETE) {
*return_token = NO_TOKEN_SEND;
goto cleanup;
}
/*
* Select the best match between the list of mechs
* that the initiator requested and the list that
* the acceptor will support.
*/
mech_wanted = negotiate_mech(supported_mechSet, mechTypes, negState);
if (*negState == REJECT) {
ret = GSS_S_BAD_MECH;
goto cleanup;
}
sc = (spnego_gss_ctx_id_t)*ctx;
if (sc != NULL) {
gss_release_buffer(&tmpmin, &sc->DER_mechTypes);
assert(mech_wanted != GSS_C_NO_OID);
} else
sc = create_spnego_ctx();
if (sc == NULL) {
ret = GSS_S_FAILURE;
*return_token = NO_TOKEN_SEND;
goto cleanup;
}
sc->mech_set = mechTypes;
mechTypes = GSS_C_NO_OID_SET;
sc->internal_mech = mech_wanted;
sc->DER_mechTypes = der_mechTypes;
der_mechTypes.length = 0;
der_mechTypes.value = NULL;
if (*negState == REQUEST_MIC)
sc->mic_reqd = 1;
*return_token = INIT_TOKEN_SEND;
sc->firstpass = 1;
*ctx = (gss_ctx_id_t)sc;
ret = GSS_S_COMPLETE;
cleanup:
gss_release_oid_set(&tmpmin, &mechTypes);
gss_release_oid_set(&tmpmin, &supported_mechSet);
if (der_mechTypes.length != 0)
gss_release_buffer(&tmpmin, &der_mechTypes);
return ret;
}
static OM_uint32
acc_ctx_cont(OM_uint32 *minstat,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 ret, tmpmin;
gss_OID supportedMech;
spnego_gss_ctx_id_t sc;
unsigned int len;
unsigned char *ptr, *bufstart;
sc = (spnego_gss_ctx_id_t)*ctx;
ret = GSS_S_DEFECTIVE_TOKEN;
*negState = REJECT;
*minstat = 0;
supportedMech = GSS_C_NO_OID;
*return_token = ERROR_TOKEN_SEND;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
#define REMAIN (buf->length - (ptr - bufstart))
if (REMAIN > INT_MAX)
return GSS_S_DEFECTIVE_TOKEN;
/*
* Attempt to work with old Sun SPNEGO.
*/
if (*ptr == HEADER_ID) {
ret = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (ret) {
*minstat = ret;
return GSS_S_DEFECTIVE_TOKEN;
}
}
if (*ptr != (CONTEXT | 0x01)) {
return GSS_S_DEFECTIVE_TOKEN;
}
ret = get_negTokenResp(minstat, ptr, REMAIN,
negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (*responseToken == GSS_C_NO_BUFFER &&
*mechListMIC == GSS_C_NO_BUFFER) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
if (supportedMech != GSS_C_NO_OID) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
sc->firstpass = 0;
*negState = ACCEPT_INCOMPLETE;
*return_token = CONT_TOKEN_SEND;
cleanup:
if (supportedMech != GSS_C_NO_OID) {
generic_gss_release_oid(&tmpmin, &supportedMech);
}
return ret;
#undef REMAIN
}
/*
* Verify that mech OID is either exactly the same as the negotiated
* mech OID, or is a mech OID supported by the negotiated mech. MS
* implementations can list a most preferred mech using an incorrect
* krb5 OID while emitting a krb5 initiator mech token having the
* correct krb5 mech OID.
*/
static OM_uint32
acc_ctx_vfy_oid(OM_uint32 *minor_status,
spnego_gss_ctx_id_t sc, gss_OID mechoid,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin;
gss_mechanism mech = NULL;
gss_OID_set mech_set = GSS_C_NO_OID_SET;
int present = 0;
if (g_OID_equal(sc->internal_mech, mechoid))
return GSS_S_COMPLETE;
mech = gssint_get_mechanism(sc->internal_mech);
if (mech == NULL || mech->gss_indicate_mechs == NULL) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_BAD_MECH;
}
ret = mech->gss_indicate_mechs(minor_status, &mech_set);
if (ret != GSS_S_COMPLETE) {
*tokflag = NO_TOKEN_SEND;
map_error(minor_status, mech);
goto cleanup;
}
ret = gss_test_oid_set_member(minor_status, mechoid,
mech_set, &present);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (!present) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ret = GSS_S_BAD_MECH;
}
cleanup:
gss_release_oid_set(&tmpmin, &mech_set);
return ret;
}
#ifndef LEAN_CLIENT
/*
* Wrap call to gss_accept_sec_context() and update state
* accordingly.
*/
static OM_uint32
acc_ctx_call_acc(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
spnego_gss_cred_id_t spcred, gss_buffer_t mechtok_in,
gss_OID *mech_type, gss_buffer_t mechtok_out,
OM_uint32 *ret_flags, OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
gss_OID_desc mechoid;
gss_cred_id_t mcred;
if (sc->ctx_handle == GSS_C_NO_CONTEXT) {
/*
* mechoid is an alias; don't free it.
*/
ret = gssint_get_mech_type(&mechoid, mechtok_in);
if (ret != GSS_S_COMPLETE) {
*tokflag = NO_TOKEN_SEND;
return ret;
}
ret = acc_ctx_vfy_oid(minor_status, sc, &mechoid,
negState, tokflag);
if (ret != GSS_S_COMPLETE)
return ret;
}
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_accept_sec_context(minor_status,
&sc->ctx_handle,
mcred,
mechtok_in,
GSS_C_NO_CHANNEL_BINDINGS,
&sc->internal_name,
mech_type,
mechtok_out,
&sc->ctx_flags,
time_rec,
delegated_cred_handle);
if (ret == GSS_S_COMPLETE) {
#ifdef MS_BUG_TEST
/*
* Force MIC to be not required even if we previously
* requested a MIC.
*/
char *envstr = getenv("MS_FORCE_NO_MIC");
if (envstr != NULL && strcmp(envstr, "1") == 0 &&
!(sc->ctx_flags & GSS_C_MUTUAL_FLAG) &&
sc->mic_reqd) {
sc->mic_reqd = 0;
}
#endif
sc->mech_complete = 1;
if (ret_flags != NULL)
*ret_flags = sc->ctx_flags;
if (!sc->mic_reqd ||
!(sc->ctx_flags & GSS_C_INTEG_FLAG)) {
/* No MIC exchange required, so we're done. */
*negState = ACCEPT_COMPLETE;
ret = GSS_S_COMPLETE;
} else {
/* handle_mic will decide if we're done. */
ret = GSS_S_CONTINUE_NEEDED;
}
} else if (ret != GSS_S_CONTINUE_NEEDED) {
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
}
return ret;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_accept_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_cred_id_t verifier_cred_handle,
gss_buffer_t input_token,
gss_channel_bindings_t input_chan_bindings,
gss_name_t *src_name,
gss_OID *mech_type,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle)
{
OM_uint32 ret, tmpmin, negState;
send_token_flag return_token;
gss_buffer_t mechtok_in, mic_in, mic_out;
gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER;
spnego_gss_ctx_id_t sc = NULL;
spnego_gss_cred_id_t spcred = NULL;
int sendTokenInit = 0, tmpret;
mechtok_in = mic_in = mic_out = GSS_C_NO_BUFFER;
/*
* This function works in three steps:
*
* 1. Perform mechanism negotiation.
* 2. Invoke the negotiated mech's gss_accept_sec_context function
* and examine the results.
* 3. Process or generate MICs if necessary.
*
* Step one determines whether the negotiation requires a MIC exchange,
* while steps two and three share responsibility for determining when
* the exchange is complete. If the selected mech completes in this
* call and no MIC exchange is expected, then step 2 will decide. If a
* MIC exchange is expected, then step 3 will decide. If an error
* occurs in any step, the exchange will be aborted, possibly with an
* error token.
*
* negState determines the state of the negotiation, and is
* communicated to the acceptor if a continuing token is sent.
* return_token is used to indicate what type of token, if any, should
* be generated.
*/
/* Validate arguments. */
if (minor_status != NULL)
*minor_status = 0;
if (output_token != GSS_C_NO_BUFFER) {
output_token->length = 0;
output_token->value = NULL;
}
if (minor_status == NULL ||
output_token == GSS_C_NO_BUFFER ||
context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (input_token == GSS_C_NO_BUFFER)
return GSS_S_CALL_INACCESSIBLE_READ;
/* Step 1: Perform mechanism negotiation. */
sc = (spnego_gss_ctx_id_t)*context_handle;
spcred = (spnego_gss_cred_id_t)verifier_cred_handle;
if (sc == NULL || sc->internal_mech == GSS_C_NO_OID) {
/* Process an initial token or request for NegHints. */
if (src_name != NULL)
*src_name = GSS_C_NO_NAME;
if (mech_type != NULL)
*mech_type = GSS_C_NO_OID;
if (time_rec != NULL)
*time_rec = 0;
if (ret_flags != NULL)
*ret_flags = 0;
if (delegated_cred_handle != NULL)
*delegated_cred_handle = GSS_C_NO_CREDENTIAL;
if (input_token->length == 0) {
ret = acc_ctx_hints(minor_status,
context_handle, spcred,
&mic_out,
&negState,
&return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
sendTokenInit = 1;
ret = GSS_S_CONTINUE_NEEDED;
} else {
/* Can set negState to REQUEST_MIC */
ret = acc_ctx_new(minor_status, input_token,
context_handle, spcred,
&mechtok_in, &mic_in,
&negState, &return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = GSS_S_CONTINUE_NEEDED;
}
} else {
/* Process a response token. Can set negState to
* ACCEPT_INCOMPLETE. */
ret = acc_ctx_cont(minor_status, input_token,
context_handle, &mechtok_in,
&mic_in, &negState, &return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = GSS_S_CONTINUE_NEEDED;
}
/* Step 2: invoke the negotiated mechanism's gss_accept_sec_context
* function. */
sc = (spnego_gss_ctx_id_t)*context_handle;
/*
* Handle mechtok_in and mic_in only if they are
* present in input_token. If neither is present, whether
* this is an error depends on whether this is the first
* round-trip. RET is set to a default value according to
* whether it is the first round-trip.
*/
if (negState != REQUEST_MIC && mechtok_in != GSS_C_NO_BUFFER) {
ret = acc_ctx_call_acc(minor_status, sc, spcred,
mechtok_in, mech_type, &mechtok_out,
ret_flags, time_rec,
delegated_cred_handle,
&negState, &return_token);
}
/* Step 3: process or generate the MIC, if the negotiated mech is
* complete and supports MICs. */
if (!HARD_ERROR(ret) && sc->mech_complete &&
(sc->ctx_flags & GSS_C_INTEG_FLAG)) {
ret = handle_mic(minor_status, mic_in,
(mechtok_out.length != 0),
sc, &mic_out,
&negState, &return_token);
}
cleanup:
if (return_token == INIT_TOKEN_SEND && sendTokenInit) {
assert(sc != NULL);
tmpret = make_spnego_tokenInit_msg(sc, 1, mic_out, 0,
GSS_C_NO_BUFFER,
return_token, output_token);
if (tmpret < 0)
ret = GSS_S_FAILURE;
} else if (return_token != NO_TOKEN_SEND &&
return_token != CHECK_MIC) {
tmpret = make_spnego_tokenTarg_msg(negState,
sc ? sc->internal_mech :
GSS_C_NO_OID,
&mechtok_out, mic_out,
return_token,
output_token);
if (tmpret < 0)
ret = GSS_S_FAILURE;
}
if (ret == GSS_S_COMPLETE) {
*context_handle = (gss_ctx_id_t)sc->ctx_handle;
if (sc->internal_name != GSS_C_NO_NAME &&
src_name != NULL) {
*src_name = sc->internal_name;
sc->internal_name = GSS_C_NO_NAME;
}
release_spnego_ctx(&sc);
} else if (ret != GSS_S_CONTINUE_NEEDED) {
if (sc != NULL) {
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
release_spnego_ctx(&sc);
}
*context_handle = GSS_C_NO_CONTEXT;
}
gss_release_buffer(&tmpmin, &mechtok_out);
if (mechtok_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechtok_in);
free(mechtok_in);
}
if (mic_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mic_in);
free(mic_in);
}
if (mic_out != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mic_out);
free(mic_out);
}
return ret;
}
#endif /* LEAN_CLIENT */
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_display_status(
OM_uint32 *minor_status,
OM_uint32 status_value,
int status_type,
gss_OID mech_type,
OM_uint32 *message_context,
gss_buffer_t status_string)
{
OM_uint32 maj = GSS_S_COMPLETE;
int ret;
dsyslog("Entering display_status\n");
*message_context = 0;
switch (status_value) {
case ERR_SPNEGO_NO_MECHS_AVAILABLE:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO cannot find "
"mechanisms to negotiate"));
break;
case ERR_SPNEGO_NO_CREDS_ACQUIRED:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO failed to acquire "
"creds"));
break;
case ERR_SPNEGO_NO_MECH_FROM_ACCEPTOR:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO acceptor did not "
"select a mechanism"));
break;
case ERR_SPNEGO_NEGOTIATION_FAILED:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO failed to negotiate a "
"mechanism"));
break;
case ERR_SPNEGO_NO_TOKEN_FROM_ACCEPTOR:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO acceptor did not "
"return a valid token"));
break;
default:
/* Not one of our minor codes; might be from a mech. Call back
* to gss_display_status, but first check for recursion. */
if (k5_getspecific(K5_KEY_GSS_SPNEGO_STATUS) != NULL) {
/* Perhaps we returned a com_err code like ENOMEM. */
const char *err = error_message(status_value);
*status_string = make_err_msg(err);
break;
}
/* Set a non-null pointer value; doesn't matter which one. */
ret = k5_setspecific(K5_KEY_GSS_SPNEGO_STATUS, &ret);
if (ret != 0) {
*minor_status = ret;
maj = GSS_S_FAILURE;
break;
}
maj = gss_display_status(minor_status, status_value,
status_type, mech_type,
message_context, status_string);
/* This is unlikely to fail; not much we can do if it does. */
(void)k5_setspecific(K5_KEY_GSS_SPNEGO_STATUS, NULL);
break;
}
dsyslog("Leaving display_status\n");
return maj;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_import_name(
OM_uint32 *minor_status,
gss_buffer_t input_name_buffer,
gss_OID input_name_type,
gss_name_t *output_name)
{
OM_uint32 status;
dsyslog("Entering import_name\n");
status = gss_import_name(minor_status, input_name_buffer,
input_name_type, output_name);
dsyslog("Leaving import_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_release_name(
OM_uint32 *minor_status,
gss_name_t *input_name)
{
OM_uint32 status;
dsyslog("Entering release_name\n");
status = gss_release_name(minor_status, input_name);
dsyslog("Leaving release_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_duplicate_name(
OM_uint32 *minor_status,
const gss_name_t input_name,
gss_name_t *output_name)
{
OM_uint32 status;
dsyslog("Entering duplicate_name\n");
status = gss_duplicate_name(minor_status, input_name, output_name);
dsyslog("Leaving duplicate_name\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_cred(
OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
gss_name_t *name,
OM_uint32 *lifetime,
int *cred_usage,
gss_OID_set *mechanisms)
{
OM_uint32 status;
spnego_gss_cred_id_t spcred = NULL;
gss_cred_id_t creds = GSS_C_NO_CREDENTIAL;
OM_uint32 tmp_minor_status;
OM_uint32 initiator_lifetime, acceptor_lifetime;
dsyslog("Entering inquire_cred\n");
/*
* To avoid infinite recursion, if GSS_C_NO_CREDENTIAL is
* supplied we call gss_inquire_cred_by_mech() on the
* first non-SPNEGO mechanism.
*/
spcred = (spnego_gss_cred_id_t)cred_handle;
if (spcred == NULL) {
status = get_available_mechs(minor_status,
GSS_C_NO_NAME,
GSS_C_BOTH,
GSS_C_NO_CRED_STORE,
&creds,
mechanisms);
if (status != GSS_S_COMPLETE) {
dsyslog("Leaving inquire_cred\n");
return (status);
}
if ((*mechanisms)->count == 0) {
gss_release_cred(&tmp_minor_status, &creds);
gss_release_oid_set(&tmp_minor_status, mechanisms);
dsyslog("Leaving inquire_cred\n");
return (GSS_S_DEFECTIVE_CREDENTIAL);
}
assert((*mechanisms)->elements != NULL);
status = gss_inquire_cred_by_mech(minor_status,
creds,
&(*mechanisms)->elements[0],
name,
&initiator_lifetime,
&acceptor_lifetime,
cred_usage);
if (status != GSS_S_COMPLETE) {
gss_release_cred(&tmp_minor_status, &creds);
dsyslog("Leaving inquire_cred\n");
return (status);
}
if (lifetime != NULL)
*lifetime = (*cred_usage == GSS_C_ACCEPT) ?
acceptor_lifetime : initiator_lifetime;
gss_release_cred(&tmp_minor_status, &creds);
} else {
status = gss_inquire_cred(minor_status, spcred->mcred,
name, lifetime,
cred_usage, mechanisms);
}
dsyslog("Leaving inquire_cred\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_compare_name(
OM_uint32 *minor_status,
const gss_name_t name1,
const gss_name_t name2,
int *name_equal)
{
OM_uint32 status = GSS_S_COMPLETE;
dsyslog("Entering compare_name\n");
status = gss_compare_name(minor_status, name1, name2, name_equal);
dsyslog("Leaving compare_name\n");
return (status);
}
/*ARGSUSED*/
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_display_name(
OM_uint32 *minor_status,
gss_name_t input_name,
gss_buffer_t output_name_buffer,
gss_OID *output_name_type)
{
OM_uint32 status = GSS_S_COMPLETE;
dsyslog("Entering display_name\n");
status = gss_display_name(minor_status, input_name,
output_name_buffer, output_name_type);
dsyslog("Leaving display_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_names_for_mech(
OM_uint32 *minor_status,
gss_OID mechanism,
gss_OID_set *name_types)
{
OM_uint32 major, minor;
dsyslog("Entering inquire_names_for_mech\n");
/*
* We only know how to handle our own mechanism.
*/
if ((mechanism != GSS_C_NULL_OID) &&
!g_OID_equal(gss_mech_spnego, mechanism)) {
*minor_status = 0;
return (GSS_S_FAILURE);
}
major = gss_create_empty_oid_set(minor_status, name_types);
if (major == GSS_S_COMPLETE) {
/* Now add our members. */
if (((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_USER_NAME,
name_types)) == GSS_S_COMPLETE) &&
((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_MACHINE_UID_NAME,
name_types)) == GSS_S_COMPLETE) &&
((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_STRING_UID_NAME,
name_types)) == GSS_S_COMPLETE)) {
major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_HOSTBASED_SERVICE,
name_types);
}
if (major != GSS_S_COMPLETE)
(void) gss_release_oid_set(&minor, name_types);
}
dsyslog("Leaving inquire_names_for_mech\n");
return (major);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t output_message_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_unwrap(minor_status,
context_handle,
input_message_buffer,
output_message_buffer,
conf_state,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_message_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
ret = gss_wrap(minor_status,
context_handle,
conf_req_flag,
qop_req,
input_message_buffer,
conf_state,
output_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_process_context_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t token_buffer)
{
OM_uint32 ret;
ret = gss_process_context_token(minor_status,
context_handle,
token_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_delete_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t output_token)
{
OM_uint32 ret = GSS_S_COMPLETE;
spnego_gss_ctx_id_t *ctx =
(spnego_gss_ctx_id_t *)context_handle;
*minor_status = 0;
if (context_handle == NULL)
return (GSS_S_FAILURE);
if (*ctx == NULL)
return (GSS_S_COMPLETE);
/*
* If this is still an SPNEGO mech, release it locally.
*/
if ((*ctx)->magic_num == SPNEGO_MAGIC_ID) {
(void) gss_delete_sec_context(minor_status,
&(*ctx)->ctx_handle,
output_token);
(void) release_spnego_ctx(ctx);
} else {
ret = gss_delete_sec_context(minor_status,
context_handle,
output_token);
}
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_context_time(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
OM_uint32 *time_rec)
{
OM_uint32 ret;
ret = gss_context_time(minor_status,
context_handle,
time_rec);
return (ret);
}
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV
spnego_gss_export_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token)
{
OM_uint32 ret;
ret = gss_export_sec_context(minor_status,
context_handle,
interprocess_token);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_import_sec_context(
OM_uint32 *minor_status,
const gss_buffer_t interprocess_token,
gss_ctx_id_t *context_handle)
{
OM_uint32 ret;
ret = gss_import_sec_context(minor_status,
interprocess_token,
context_handle);
return (ret);
}
#endif /* LEAN_CLIENT */
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_context(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_name_t *src_name,
gss_name_t *targ_name,
OM_uint32 *lifetime_rec,
gss_OID *mech_type,
OM_uint32 *ctx_flags,
int *locally_initiated,
int *opened)
{
OM_uint32 ret = GSS_S_COMPLETE;
ret = gss_inquire_context(minor_status,
context_handle,
src_name,
targ_name,
lifetime_rec,
mech_type,
ctx_flags,
locally_initiated,
opened);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_size_limit(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
OM_uint32 req_output_size,
OM_uint32 *max_input_size)
{
OM_uint32 ret;
ret = gss_wrap_size_limit(minor_status,
context_handle,
conf_req_flag,
qop_req,
req_output_size,
max_input_size);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_qop_t qop_req,
const gss_buffer_t message_buffer,
gss_buffer_t message_token)
{
OM_uint32 ret;
ret = gss_get_mic(minor_status,
context_handle,
qop_req,
message_buffer,
message_token);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_verify_mic(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t msg_buffer,
const gss_buffer_t token_buffer,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_verify_mic(minor_status,
context_handle,
msg_buffer,
token_buffer,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_sec_context_by_oid(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
ret = gss_inquire_sec_context_by_oid(minor_status,
context_handle,
desired_object,
data_set);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_cred_by_oid(
OM_uint32 *minor_status,
const gss_cred_id_t cred_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_inquire_cred_by_oid(minor_status,
mcred,
desired_object,
data_set);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_cred_option(
OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 ret;
OM_uint32 tmp_minor_status;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)*cred_handle;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_set_cred_option(minor_status,
&mcred,
desired_object,
value);
if (ret == GSS_S_COMPLETE && spcred == NULL) {
/*
* If the mechanism allocated a new credential handle, then
* we need to wrap it up in an SPNEGO credential handle.
*/
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
gss_release_cred(&tmp_minor_status, &mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->mcred = mcred;
spcred->neg_mechs = GSS_C_NULL_OID_SET;
*cred_handle = (gss_cred_id_t)spcred;
}
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_sec_context_option(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 ret;
ret = gss_set_sec_context_option(minor_status,
context_handle,
desired_object,
value);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_assoc_buffer,
gss_buffer_t input_payload_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
ret = gss_wrap_aead(minor_status,
context_handle,
conf_req_flag,
qop_req,
input_assoc_buffer,
input_payload_buffer,
conf_state,
output_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t input_assoc_buffer,
gss_buffer_t output_payload_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_unwrap_aead(minor_status,
context_handle,
input_message_buffer,
input_assoc_buffer,
output_payload_buffer,
conf_state,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_wrap_iov(minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int *conf_state,
gss_qop_t *qop_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_unwrap_iov(minor_status,
context_handle,
conf_state,
qop_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_wrap_iov_length(minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_complete_auth_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer)
{
OM_uint32 ret;
ret = gss_complete_auth_token(minor_status,
context_handle,
input_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_impersonate_name(OM_uint32 *minor_status,
const gss_cred_id_t impersonator_cred_handle,
const gss_name_t desired_name,
OM_uint32 time_req,
gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status;
gss_OID_set amechs = GSS_C_NULL_OID_SET;
spnego_gss_cred_id_t imp_spcred = NULL, out_spcred = NULL;
gss_cred_id_t imp_mcred, out_mcred;
dsyslog("Entering spnego_gss_acquire_cred_impersonate_name\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
imp_spcred = (spnego_gss_cred_id_t)impersonator_cred_handle;
imp_mcred = imp_spcred ? imp_spcred->mcred : GSS_C_NO_CREDENTIAL;
if (desired_mechs == GSS_C_NO_OID_SET) {
status = gss_inquire_cred(minor_status, imp_mcred, NULL, NULL,
NULL, &amechs);
if (status != GSS_S_COMPLETE)
return status;
desired_mechs = amechs;
}
status = gss_acquire_cred_impersonate_name(minor_status, imp_mcred,
desired_name, time_req,
desired_mechs, cred_usage,
&out_mcred, actual_mechs,
time_rec);
if (amechs != GSS_C_NULL_OID_SET)
(void) gss_release_oid_set(minor_status, &amechs);
out_spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (out_spcred == NULL) {
gss_release_cred(minor_status, &out_mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
out_spcred->mcred = out_mcred;
out_spcred->neg_mechs = GSS_C_NULL_OID_SET;
*output_cred_handle = (gss_cred_id_t)out_spcred;
dsyslog("Leaving spnego_gss_acquire_cred_impersonate_name\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_with_password(OM_uint32 *minor_status,
const gss_name_t desired_name,
const gss_buffer_t password,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status, tmpmin;
gss_OID_set amechs = GSS_C_NULL_OID_SET;
gss_cred_id_t mcred = NULL;
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_acquire_cred_with_password\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
status = get_available_mechs(minor_status, desired_name,
cred_usage, GSS_C_NO_CRED_STORE,
NULL, &amechs);
if (status != GSS_S_COMPLETE)
goto cleanup;
status = gss_acquire_cred_with_password(minor_status, desired_name,
password, time_req, amechs,
cred_usage, &mcred,
actual_mechs, time_rec);
if (status != GSS_S_COMPLETE)
goto cleanup;
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
*minor_status = ENOMEM;
status = GSS_S_FAILURE;
goto cleanup;
}
spcred->neg_mechs = GSS_C_NULL_OID_SET;
spcred->mcred = mcred;
mcred = GSS_C_NO_CREDENTIAL;
*output_cred_handle = (gss_cred_id_t)spcred;
cleanup:
(void) gss_release_oid_set(&tmpmin, &amechs);
(void) gss_release_cred(&tmpmin, &mcred);
dsyslog("Leaving spnego_gss_acquire_cred_with_password\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_display_name_ext(OM_uint32 *minor_status,
gss_name_t name,
gss_OID display_as_name_type,
gss_buffer_t display_name)
{
OM_uint32 ret;
ret = gss_display_name_ext(minor_status,
name,
display_as_name_type,
display_name);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_name(OM_uint32 *minor_status,
gss_name_t name,
int *name_is_MN,
gss_OID *MN_mech,
gss_buffer_set_t *attrs)
{
OM_uint32 ret;
ret = gss_inquire_name(minor_status,
name,
name_is_MN,
MN_mech,
attrs);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr,
int *authenticated,
int *complete,
gss_buffer_t value,
gss_buffer_t display_value,
int *more)
{
OM_uint32 ret;
ret = gss_get_name_attribute(minor_status,
name,
attr,
authenticated,
complete,
value,
display_value,
more);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
int complete,
gss_buffer_t attr,
gss_buffer_t value)
{
OM_uint32 ret;
ret = gss_set_name_attribute(minor_status,
name,
complete,
attr,
value);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_delete_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr)
{
OM_uint32 ret;
ret = gss_delete_name_attribute(minor_status,
name,
attr);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_export_name_composite(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t exp_composite_name)
{
OM_uint32 ret;
ret = gss_export_name_composite(minor_status,
name,
exp_composite_name);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_map_name_to_any(OM_uint32 *minor_status,
gss_name_t name,
int authenticated,
gss_buffer_t type_id,
gss_any_t *output)
{
OM_uint32 ret;
ret = gss_map_name_to_any(minor_status,
name,
authenticated,
type_id,
output);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_release_any_name_mapping(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t type_id,
gss_any_t *input)
{
OM_uint32 ret;
ret = gss_release_any_name_mapping(minor_status,
name,
type_id,
input);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_pseudo_random(OM_uint32 *minor_status,
gss_ctx_id_t context,
int prf_key,
const gss_buffer_t prf_in,
ssize_t desired_output_len,
gss_buffer_t prf_out)
{
OM_uint32 ret;
ret = gss_pseudo_random(minor_status,
context,
prf_key,
prf_in,
desired_output_len,
prf_out);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_neg_mechs(OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
const gss_OID_set mech_list)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
/* Store mech_list in spcred for use in negotiation logic. */
gss_release_oid_set(minor_status, &spcred->neg_mechs);
ret = generic_gss_copy_oid_set(minor_status, mech_list,
&spcred->neg_mechs);
return (ret);
}
#define SPNEGO_SASL_NAME "SPNEGO"
#define SPNEGO_SASL_NAME_LEN (sizeof(SPNEGO_SASL_NAME) - 1)
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_mech_for_saslname(OM_uint32 *minor_status,
const gss_buffer_t sasl_mech_name,
gss_OID *mech_type)
{
if (sasl_mech_name->length == SPNEGO_SASL_NAME_LEN &&
memcmp(sasl_mech_name->value, SPNEGO_SASL_NAME,
SPNEGO_SASL_NAME_LEN) == 0) {
if (mech_type != NULL)
*mech_type = (gss_OID)gss_mech_spnego;
return (GSS_S_COMPLETE);
}
return (GSS_S_BAD_MECH);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_saslname_for_mech(OM_uint32 *minor_status,
const gss_OID desired_mech,
gss_buffer_t sasl_mech_name,
gss_buffer_t mech_name,
gss_buffer_t mech_description)
{
*minor_status = 0;
if (!g_OID_equal(desired_mech, gss_mech_spnego))
return (GSS_S_BAD_MECH);
if (!g_make_string_buffer(SPNEGO_SASL_NAME, sasl_mech_name) ||
!g_make_string_buffer("spnego", mech_name) ||
!g_make_string_buffer("Simple and Protected GSS-API "
"Negotiation Mechanism", mech_description))
goto fail;
return (GSS_S_COMPLETE);
fail:
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_attrs_for_mech(OM_uint32 *minor_status,
gss_const_OID mech,
gss_OID_set *mech_attrs,
gss_OID_set *known_mech_attrs)
{
OM_uint32 major, tmpMinor;
/* known_mech_attrs is handled by mechglue */
*minor_status = 0;
if (mech_attrs == NULL)
return (GSS_S_COMPLETE);
major = gss_create_empty_oid_set(minor_status, mech_attrs);
if (GSS_ERROR(major))
goto cleanup;
#define MA_SUPPORTED(ma) do { \
major = gss_add_oid_set_member(minor_status, \
(gss_OID)ma, mech_attrs); \
if (GSS_ERROR(major)) \
goto cleanup; \
} while (0)
MA_SUPPORTED(GSS_C_MA_MECH_NEGO);
MA_SUPPORTED(GSS_C_MA_ITOK_FRAMED);
cleanup:
if (GSS_ERROR(major))
gss_release_oid_set(&tmpMinor, mech_attrs);
return (major);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_export_cred(OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
gss_buffer_t token)
{
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
return (gss_export_cred(minor_status, spcred->mcred, token));
}
OM_uint32 KRB5_CALLCONV
spnego_gss_import_cred(OM_uint32 *minor_status,
gss_buffer_t token,
gss_cred_id_t *cred_handle)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred;
gss_cred_id_t mcred;
ret = gss_import_cred(minor_status, token, &mcred);
if (GSS_ERROR(ret))
return (ret);
spcred = malloc(sizeof(*spcred));
if (spcred == NULL) {
gss_release_cred(minor_status, &mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->mcred = mcred;
spcred->neg_mechs = GSS_C_NULL_OID_SET;
*cred_handle = (gss_cred_id_t)spcred;
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov,
int iov_count)
{
return gss_get_mic_iov(minor_status, context_handle, qop_req, iov,
iov_count);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_verify_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t *qop_state, gss_iov_buffer_desc *iov,
int iov_count)
{
return gss_verify_mic_iov(minor_status, context_handle, qop_state, iov,
iov_count);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, gss_qop_t qop_req,
gss_iov_buffer_desc *iov, int iov_count)
{
return gss_get_mic_iov_length(minor_status, context_handle, qop_req, iov,
iov_count);
}
/*
* We will release everything but the ctx_handle so that it
* can be passed back to init/accept context. This routine should
* not be called until after the ctx_handle memory is assigned to
* the supplied context handle from init/accept context.
*/
static void
release_spnego_ctx(spnego_gss_ctx_id_t *ctx)
{
spnego_gss_ctx_id_t context;
OM_uint32 minor_stat;
context = *ctx;
if (context != NULL) {
(void) gss_release_buffer(&minor_stat,
&context->DER_mechTypes);
(void) gss_release_oid_set(&minor_stat, &context->mech_set);
(void) gss_release_name(&minor_stat, &context->internal_name);
if (context->optionStr != NULL) {
free(context->optionStr);
context->optionStr = NULL;
}
free(context);
*ctx = NULL;
}
}
/*
* Can't use gss_indicate_mechs by itself to get available mechs for
* SPNEGO because it will also return the SPNEGO mech and we do not
* want to consider SPNEGO as an available security mech for
* negotiation. For this reason, get_available_mechs will return
* all available mechs except SPNEGO.
*
* If a ptr to a creds list is given, this function will attempt
* to acquire creds for the creds given and trim the list of
* returned mechanisms to only those for which creds are valid.
*
*/
static OM_uint32
get_available_mechs(OM_uint32 *minor_status,
gss_name_t name, gss_cred_usage_t usage,
gss_const_key_value_set_t cred_store,
gss_cred_id_t *creds, gss_OID_set *rmechs)
{
unsigned int i;
int found = 0;
OM_uint32 major_status = GSS_S_COMPLETE, tmpmin;
gss_OID_set mechs, goodmechs;
major_status = gss_indicate_mechs(minor_status, &mechs);
if (major_status != GSS_S_COMPLETE) {
return (major_status);
}
major_status = gss_create_empty_oid_set(minor_status, rmechs);
if (major_status != GSS_S_COMPLETE) {
(void) gss_release_oid_set(minor_status, &mechs);
return (major_status);
}
for (i = 0; i < mechs->count && major_status == GSS_S_COMPLETE; i++) {
if ((mechs->elements[i].length
!= spnego_mechanism.mech_type.length) ||
memcmp(mechs->elements[i].elements,
spnego_mechanism.mech_type.elements,
spnego_mechanism.mech_type.length)) {
major_status = gss_add_oid_set_member(minor_status,
&mechs->elements[i],
rmechs);
if (major_status == GSS_S_COMPLETE)
found++;
}
}
/*
* If the caller wanted a list of creds returned,
* trim the list of mechanisms down to only those
* for which the creds are valid.
*/
if (found > 0 && major_status == GSS_S_COMPLETE && creds != NULL) {
major_status = gss_acquire_cred_from(minor_status, name,
GSS_C_INDEFINITE,
*rmechs, usage,
cred_store, creds,
&goodmechs, NULL);
/*
* Drop the old list in favor of the new
* "trimmed" list.
*/
(void) gss_release_oid_set(&tmpmin, rmechs);
if (major_status == GSS_S_COMPLETE) {
(void) gssint_copy_oid_set(&tmpmin,
goodmechs, rmechs);
(void) gss_release_oid_set(&tmpmin, &goodmechs);
}
}
(void) gss_release_oid_set(&tmpmin, &mechs);
if (found == 0 || major_status != GSS_S_COMPLETE) {
*minor_status = ERR_SPNEGO_NO_MECHS_AVAILABLE;
map_errcode(minor_status);
if (major_status == GSS_S_COMPLETE)
major_status = GSS_S_FAILURE;
}
return (major_status);
}
/*
* Return a list of mechanisms we are willing to negotiate for a credential,
* taking into account the mech set provided with gss_set_neg_mechs if it
* exists.
*/
static OM_uint32
get_negotiable_mechs(OM_uint32 *minor_status, spnego_gss_cred_id_t spcred,
gss_cred_usage_t usage, gss_OID_set *rmechs)
{
OM_uint32 ret, tmpmin;
gss_cred_id_t creds = GSS_C_NO_CREDENTIAL, *credptr;
gss_OID_set cred_mechs = GSS_C_NULL_OID_SET;
gss_OID_set intersect_mechs = GSS_C_NULL_OID_SET;
unsigned int i;
int present;
if (spcred == NULL) {
/*
* The default credentials were supplied. Return a list of all
* available mechs except SPNEGO. When initiating, trim this
* list to mechs we can acquire credentials for.
*/
credptr = (usage == GSS_C_INITIATE) ? &creds : NULL;
ret = get_available_mechs(minor_status, GSS_C_NO_NAME, usage,
GSS_C_NO_CRED_STORE, credptr,
rmechs);
gss_release_cred(&tmpmin, &creds);
return (ret);
}
/* Get the list of mechs in the mechglue cred. */
ret = gss_inquire_cred(minor_status, spcred->mcred, NULL, NULL, NULL,
&cred_mechs);
if (ret != GSS_S_COMPLETE)
return (ret);
if (spcred->neg_mechs == GSS_C_NULL_OID_SET) {
/* gss_set_neg_mechs was never called; return cred_mechs. */
*rmechs = cred_mechs;
*minor_status = 0;
return (GSS_S_COMPLETE);
}
/* Compute the intersection of cred_mechs and spcred->neg_mechs,
* preserving the order in spcred->neg_mechs. */
ret = gss_create_empty_oid_set(minor_status, &intersect_mechs);
if (ret != GSS_S_COMPLETE) {
gss_release_oid_set(&tmpmin, &cred_mechs);
return (ret);
}
for (i = 0; i < spcred->neg_mechs->count; i++) {
gss_test_oid_set_member(&tmpmin,
&spcred->neg_mechs->elements[i],
cred_mechs, &present);
if (!present)
continue;
ret = gss_add_oid_set_member(minor_status,
&spcred->neg_mechs->elements[i],
&intersect_mechs);
if (ret != GSS_S_COMPLETE)
break;
}
gss_release_oid_set(&tmpmin, &cred_mechs);
if (intersect_mechs->count == 0 || ret != GSS_S_COMPLETE) {
gss_release_oid_set(&tmpmin, &intersect_mechs);
*minor_status = ERR_SPNEGO_NO_MECHS_AVAILABLE;
map_errcode(minor_status);
return (GSS_S_FAILURE);
}
*rmechs = intersect_mechs;
*minor_status = 0;
return (GSS_S_COMPLETE);
}
/* following are token creation and reading routines */
/*
* If buff_in is not pointing to a MECH_OID, then return NULL and do not
* advance the buffer, otherwise, decode the mech_oid from the buffer and
* place in gss_OID.
*/
static gss_OID
get_mech_oid(OM_uint32 *minor_status, unsigned char **buff_in, size_t length)
{
OM_uint32 status;
gss_OID_desc toid;
gss_OID mech_out = NULL;
unsigned char *start, *end;
if (length < 1 || **buff_in != MECH_OID)
return (NULL);
start = *buff_in;
end = start + length;
(*buff_in)++;
toid.length = *(*buff_in)++;
if ((*buff_in + toid.length) > end)
return (NULL);
toid.elements = *buff_in;
*buff_in += toid.length;
status = generic_gss_copy_oid(minor_status, &toid, &mech_out);
if (status != GSS_S_COMPLETE) {
map_errcode(minor_status);
mech_out = NULL;
}
return (mech_out);
}
/*
* der encode the given mechanism oid into buf_out, advancing the
* buffer pointer.
*/
static int
put_mech_oid(unsigned char **buf_out, gss_OID_const mech, unsigned int buflen)
{
if (buflen < mech->length + 2)
return (-1);
*(*buf_out)++ = MECH_OID;
*(*buf_out)++ = (unsigned char) mech->length;
memcpy(*buf_out, mech->elements, mech->length);
*buf_out += mech->length;
return (0);
}
/*
* verify that buff_in points to an octet string, if it does not,
* return NULL and don't advance the pointer. If it is an octet string
* decode buff_in into a gss_buffer_t and return it, advancing the
* buffer pointer.
*/
static gss_buffer_t
get_input_token(unsigned char **buff_in, unsigned int buff_length)
{
gss_buffer_t input_token;
unsigned int len;
if (g_get_tag_and_length(buff_in, OCTET_STRING, buff_length, &len) < 0)
return (NULL);
input_token = (gss_buffer_t)malloc(sizeof (gss_buffer_desc));
if (input_token == NULL)
return (NULL);
input_token->length = len;
if (input_token->length > 0) {
input_token->value = gssalloc_malloc(input_token->length);
if (input_token->value == NULL) {
free(input_token);
return (NULL);
}
memcpy(input_token->value, *buff_in, input_token->length);
} else {
input_token->value = NULL;
}
*buff_in += input_token->length;
return (input_token);
}
/*
* verify that the input token length is not 0. If it is, just return.
* If the token length is greater than 0, der encode as an octet string
* and place in buf_out, advancing buf_out.
*/
static int
put_input_token(unsigned char **buf_out, gss_buffer_t input_token,
unsigned int buflen)
{
int ret;
/* if token length is 0, we do not want to send */
if (input_token->length == 0)
return (0);
if (input_token->length > buflen)
return (-1);
*(*buf_out)++ = OCTET_STRING;
if ((ret = gssint_put_der_length(input_token->length, buf_out,
input_token->length)))
return (ret);
TWRITE_STR(*buf_out, input_token->value, input_token->length);
return (0);
}
/*
* verify that buff_in points to a sequence of der encoding. The mech
* set is the only sequence of encoded object in the token, so if it is
* a sequence of encoding, decode the mechset into a gss_OID_set and
* return it, advancing the buffer pointer.
*/
static gss_OID_set
get_mech_set(OM_uint32 *minor_status, unsigned char **buff_in,
unsigned int buff_length)
{
gss_OID_set returned_mechSet;
OM_uint32 major_status;
int length;
unsigned int bytes;
OM_uint32 set_length;
unsigned char *start;
int i;
if (**buff_in != SEQUENCE_OF)
return (NULL);
start = *buff_in;
(*buff_in)++;
length = gssint_get_der_length(buff_in, buff_length, &bytes);
if (length < 0 || buff_length - bytes < (unsigned int)length)
return NULL;
major_status = gss_create_empty_oid_set(minor_status,
&returned_mechSet);
if (major_status != GSS_S_COMPLETE)
return (NULL);
for (set_length = 0, i = 0; set_length < (unsigned int)length; i++) {
gss_OID_desc *temp = get_mech_oid(minor_status, buff_in,
buff_length - (*buff_in - start));
if (temp == NULL)
break;
major_status = gss_add_oid_set_member(minor_status,
temp, &returned_mechSet);
if (major_status == GSS_S_COMPLETE) {
set_length += returned_mechSet->elements[i].length +2;
if (generic_gss_release_oid(minor_status, &temp))
map_errcode(minor_status);
}
}
return (returned_mechSet);
}
/*
* Encode mechSet into buf.
*/
static int
put_mech_set(gss_OID_set mechSet, gss_buffer_t buf)
{
unsigned char *ptr;
unsigned int i;
unsigned int tlen, ilen;
tlen = ilen = 0;
for (i = 0; i < mechSet->count; i++) {
/*
* 0x06 [DER LEN] [OID]
*/
ilen += 1 +
gssint_der_length_size(mechSet->elements[i].length) +
mechSet->elements[i].length;
}
/*
* 0x30 [DER LEN]
*/
tlen = 1 + gssint_der_length_size(ilen) + ilen;
ptr = gssalloc_malloc(tlen);
if (ptr == NULL)
return -1;
buf->value = ptr;
buf->length = tlen;
#define REMAIN (buf->length - ((unsigned char *)buf->value - ptr))
*ptr++ = SEQUENCE_OF;
if (gssint_put_der_length(ilen, &ptr, REMAIN) < 0)
return -1;
for (i = 0; i < mechSet->count; i++) {
if (put_mech_oid(&ptr, &mechSet->elements[i], REMAIN) < 0) {
return -1;
}
}
return 0;
#undef REMAIN
}
/*
* Verify that buff_in is pointing to a BIT_STRING with the correct
* length and padding for the req_flags. If it is, decode req_flags
* and return them, otherwise, return NULL.
*/
static OM_uint32
get_req_flags(unsigned char **buff_in, OM_uint32 bodysize,
OM_uint32 *req_flags)
{
unsigned int len;
if (**buff_in != (CONTEXT | 0x01))
return (0);
if (g_get_tag_and_length(buff_in, (CONTEXT | 0x01),
bodysize, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_LENGTH)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_PADDING)
return GSS_S_DEFECTIVE_TOKEN;
*req_flags = (OM_uint32) (*(*buff_in)++ >> 1);
return (0);
}
static OM_uint32
get_negTokenInit(OM_uint32 *minor_status,
gss_buffer_t buf,
gss_buffer_t der_mechSet,
gss_OID_set *mechSet,
OM_uint32 *req_flags,
gss_buffer_t *mechtok,
gss_buffer_t *mechListMIC)
{
OM_uint32 err;
unsigned char *ptr, *bufstart;
unsigned int len;
gss_buffer_desc tmpbuf;
*minor_status = 0;
der_mechSet->length = 0;
der_mechSet->value = NULL;
*mechSet = GSS_C_NO_OID_SET;
*req_flags = 0;
*mechtok = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
if ((buf->length - (ptr - bufstart)) > INT_MAX)
return GSS_S_FAILURE;
#define REMAIN (buf->length - (ptr - bufstart))
err = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (err) {
*minor_status = err;
map_errcode(minor_status);
return GSS_S_FAILURE;
}
*minor_status = g_verify_neg_token_init(&ptr, REMAIN);
if (*minor_status) {
map_errcode(minor_status);
return GSS_S_FAILURE;
}
/* alias into input_token */
tmpbuf.value = ptr;
tmpbuf.length = REMAIN;
*mechSet = get_mech_set(minor_status, &ptr, REMAIN);
if (*mechSet == NULL)
return GSS_S_FAILURE;
tmpbuf.length = ptr - (unsigned char *)tmpbuf.value;
der_mechSet->value = gssalloc_malloc(tmpbuf.length);
if (der_mechSet->value == NULL)
return GSS_S_FAILURE;
memcpy(der_mechSet->value, tmpbuf.value, tmpbuf.length);
der_mechSet->length = tmpbuf.length;
err = get_req_flags(&ptr, REMAIN, req_flags);
if (err != GSS_S_COMPLETE) {
return err;
}
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x02),
REMAIN, &len) >= 0) {
*mechtok = get_input_token(&ptr, len);
if (*mechtok == GSS_C_NO_BUFFER) {
return GSS_S_FAILURE;
}
}
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x03),
REMAIN, &len) >= 0) {
*mechListMIC = get_input_token(&ptr, len);
if (*mechListMIC == GSS_C_NO_BUFFER) {
return GSS_S_FAILURE;
}
}
return GSS_S_COMPLETE;
#undef REMAIN
}
static OM_uint32
get_negTokenResp(OM_uint32 *minor_status,
unsigned char *buf, unsigned int buflen,
OM_uint32 *negState,
gss_OID *supportedMech,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC)
{
unsigned char *ptr, *bufstart;
unsigned int len;
int tmplen;
unsigned int tag, bytes;
*negState = ACCEPT_DEFECTIVE_TOKEN;
*supportedMech = GSS_C_NO_OID;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf;
#define REMAIN (buflen - (ptr - bufstart))
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x01), REMAIN, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (*ptr++ == SEQUENCE) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
}
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
if (tag == CONTEXT) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
if (g_get_tag_and_length(&ptr, ENUMERATED,
REMAIN, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (len != ENUMERATION_LENGTH)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
return GSS_S_DEFECTIVE_TOKEN;
*negState = *ptr++;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x01)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*supportedMech = get_mech_oid(minor_status, &ptr, REMAIN);
if (*supportedMech == GSS_C_NO_OID)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x02)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*responseToken = get_input_token(&ptr, REMAIN);
if (*responseToken == GSS_C_NO_BUFFER)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x03)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*mechListMIC = get_input_token(&ptr, REMAIN);
if (*mechListMIC == GSS_C_NO_BUFFER)
return GSS_S_DEFECTIVE_TOKEN;
/* Handle Windows 2000 duplicate response token */
if (*responseToken &&
((*responseToken)->length == (*mechListMIC)->length) &&
!memcmp((*responseToken)->value, (*mechListMIC)->value,
(*responseToken)->length)) {
OM_uint32 tmpmin;
gss_release_buffer(&tmpmin, *mechListMIC);
free(*mechListMIC);
*mechListMIC = NULL;
}
}
return GSS_S_COMPLETE;
#undef REMAIN
}
/*
* der encode the passed negResults as an ENUMERATED type and
* place it in buf_out, advancing the buffer.
*/
static int
put_negResult(unsigned char **buf_out, OM_uint32 negResult,
unsigned int buflen)
{
if (buflen < 3)
return (-1);
*(*buf_out)++ = ENUMERATED;
*(*buf_out)++ = ENUMERATION_LENGTH;
*(*buf_out)++ = (unsigned char) negResult;
return (0);
}
/*
* This routine compares the recieved mechset to the mechset that
* this server can support. It looks sequentially through the mechset
* and the first one that matches what the server can support is
* chosen as the negotiated mechanism. If one is found, negResult
* is set to ACCEPT_INCOMPLETE if it's the first mech, REQUEST_MIC if
* it's not the first mech, otherwise we return NULL and negResult
* is set to REJECT. The returned pointer is an alias into
* received->elements and should not be freed.
*
* NOTE: There is currently no way to specify a preference order of
* mechanisms supported by the acceptor.
*/
static gss_OID
negotiate_mech(gss_OID_set supported, gss_OID_set received,
OM_uint32 *negResult)
{
size_t i, j;
for (i = 0; i < received->count; i++) {
gss_OID mech_oid = &received->elements[i];
/* Accept wrong mechanism OID from MS clients */
if (g_OID_equal(mech_oid, &gss_mech_krb5_wrong_oid))
mech_oid = (gss_OID)&gss_mech_krb5_oid;
for (j = 0; j < supported->count; j++) {
if (g_OID_equal(mech_oid, &supported->elements[j])) {
*negResult = (i == 0) ? ACCEPT_INCOMPLETE :
REQUEST_MIC;
return &received->elements[i];
}
}
}
*negResult = REJECT;
return (NULL);
}
/*
* the next two routines make a token buffer suitable for
* spnego_gss_display_status. These currently take the string
* in name and place it in the token. Eventually, if
* spnego_gss_display_status returns valid error messages,
* these routines will be changes to return the error string.
*/
static spnego_token_t
make_spnego_token(const char *name)
{
return (spnego_token_t)strdup(name);
}
static gss_buffer_desc
make_err_msg(const char *name)
{
gss_buffer_desc buffer;
if (name == NULL) {
buffer.length = 0;
buffer.value = NULL;
} else {
buffer.length = strlen(name)+1;
buffer.value = make_spnego_token(name);
}
return (buffer);
}
/*
* Create the client side spnego token passed back to gss_init_sec_context
* and eventually up to the application program and over to the server.
*
* Use DER rules, definite length method per RFC 2478
*/
static int
make_spnego_tokenInit_msg(spnego_gss_ctx_id_t spnego_ctx,
int negHintsCompat,
gss_buffer_t mechListMIC, OM_uint32 req_flags,
gss_buffer_t data, send_token_flag sendtoken,
gss_buffer_t outbuf)
{
int ret = 0;
unsigned int tlen, dataLen = 0;
unsigned int negTokenInitSize = 0;
unsigned int negTokenInitSeqSize = 0;
unsigned int negTokenInitContSize = 0;
unsigned int rspTokenSize = 0;
unsigned int mechListTokenSize = 0;
unsigned int micTokenSize = 0;
unsigned char *t;
unsigned char *ptr;
if (outbuf == GSS_C_NO_BUFFER)
return (-1);
outbuf->length = 0;
outbuf->value = NULL;
/* calculate the data length */
/*
* 0xa0 [DER LEN] [mechTypes]
*/
mechListTokenSize = 1 +
gssint_der_length_size(spnego_ctx->DER_mechTypes.length) +
spnego_ctx->DER_mechTypes.length;
dataLen += mechListTokenSize;
/*
* If a token from gss_init_sec_context exists,
* add the length of the token + the ASN.1 overhead
*/
if (data != NULL) {
/*
* Encoded in final output as:
* 0xa2 [DER LEN] 0x04 [DER LEN] [DATA]
* -----s--------|--------s2----------
*/
rspTokenSize = 1 +
gssint_der_length_size(data->length) +
data->length;
dataLen += 1 + gssint_der_length_size(rspTokenSize) +
rspTokenSize;
}
if (mechListMIC) {
/*
* Encoded in final output as:
* 0xa3 [DER LEN] 0x04 [DER LEN] [DATA]
* --s-- -----tlen------------
*/
micTokenSize = 1 +
gssint_der_length_size(mechListMIC->length) +
mechListMIC->length;
dataLen += 1 +
gssint_der_length_size(micTokenSize) +
micTokenSize;
}
/*
* Add size of DER encoding
* [ SEQUENCE { MechTypeList | ReqFLags | Token | mechListMIC } ]
* 0x30 [DER_LEN] [data]
*
*/
negTokenInitContSize = dataLen;
negTokenInitSeqSize = 1 + gssint_der_length_size(dataLen) + dataLen;
dataLen = negTokenInitSeqSize;
/*
* negTokenInitSize indicates the bytes needed to
* hold the ASN.1 encoding of the entire NegTokenInit
* SEQUENCE.
* 0xa0 [DER_LEN] + data
*
*/
negTokenInitSize = 1 +
gssint_der_length_size(negTokenInitSeqSize) +
negTokenInitSeqSize;
tlen = g_token_size(gss_mech_spnego, negTokenInitSize);
t = (unsigned char *) gssalloc_malloc(tlen);
if (t == NULL) {
return (-1);
}
ptr = t;
/* create the message */
if ((ret = g_make_token_header(gss_mech_spnego, negTokenInitSize,
&ptr, tlen)))
goto errout;
*ptr++ = CONTEXT; /* NegotiationToken identifier */
if ((ret = gssint_put_der_length(negTokenInitSeqSize, &ptr, tlen)))
goto errout;
*ptr++ = SEQUENCE;
if ((ret = gssint_put_der_length(negTokenInitContSize, &ptr,
tlen - (int)(ptr-t))))
goto errout;
*ptr++ = CONTEXT | 0x00; /* MechTypeList identifier */
if ((ret = gssint_put_der_length(spnego_ctx->DER_mechTypes.length,
&ptr, tlen - (int)(ptr-t))))
goto errout;
/* We already encoded the MechSetList */
(void) memcpy(ptr, spnego_ctx->DER_mechTypes.value,
spnego_ctx->DER_mechTypes.length);
ptr += spnego_ctx->DER_mechTypes.length;
if (data != NULL) {
*ptr++ = CONTEXT | 0x02;
if ((ret = gssint_put_der_length(rspTokenSize,
&ptr, tlen - (int)(ptr - t))))
goto errout;
if ((ret = put_input_token(&ptr, data,
tlen - (int)(ptr - t))))
goto errout;
}
if (mechListMIC != GSS_C_NO_BUFFER) {
*ptr++ = CONTEXT | 0x03;
if ((ret = gssint_put_der_length(micTokenSize,
&ptr, tlen - (int)(ptr - t))))
goto errout;
if (negHintsCompat) {
ret = put_neg_hints(&ptr, mechListMIC,
tlen - (int)(ptr - t));
if (ret)
goto errout;
} else if ((ret = put_input_token(&ptr, mechListMIC,
tlen - (int)(ptr - t))))
goto errout;
}
errout:
if (ret != 0) {
if (t)
free(t);
t = NULL;
tlen = 0;
}
outbuf->length = tlen;
outbuf->value = (void *) t;
return (ret);
}
/*
* create the server side spnego token passed back to
* gss_accept_sec_context and eventually up to the application program
* and over to the client.
*/
static int
make_spnego_tokenTarg_msg(OM_uint32 status, gss_OID mech_wanted,
gss_buffer_t data, gss_buffer_t mechListMIC,
send_token_flag sendtoken,
gss_buffer_t outbuf)
{
unsigned int tlen = 0;
unsigned int ret = 0;
unsigned int NegTokenTargSize = 0;
unsigned int NegTokenSize = 0;
unsigned int rspTokenSize = 0;
unsigned int micTokenSize = 0;
unsigned int dataLen = 0;
unsigned char *t;
unsigned char *ptr;
if (outbuf == GSS_C_NO_BUFFER)
return (GSS_S_DEFECTIVE_TOKEN);
if (sendtoken == INIT_TOKEN_SEND && mech_wanted == GSS_C_NO_OID)
return (GSS_S_DEFECTIVE_TOKEN);
outbuf->length = 0;
outbuf->value = NULL;
/*
* ASN.1 encoding of the negResult
* ENUMERATED type is 3 bytes
* ENUMERATED TAG, Length, Value,
* Plus 2 bytes for the CONTEXT id and length.
*/
dataLen = 5;
/*
* calculate data length
*
* If this is the initial token, include length of
* mech_type and the negotiation result fields.
*/
if (sendtoken == INIT_TOKEN_SEND) {
int mechlistTokenSize;
/*
* 1 byte for the CONTEXT ID(0xa0),
* 1 byte for the OID ID(0x06)
* 1 byte for OID Length field
* Plus the rest... (OID Length, OID value)
*/
mechlistTokenSize = 3 + mech_wanted->length +
gssint_der_length_size(mech_wanted->length);
dataLen += mechlistTokenSize;
}
if (data != NULL && data->length > 0) {
/* Length of the inner token */
rspTokenSize = 1 + gssint_der_length_size(data->length) +
data->length;
dataLen += rspTokenSize;
/* Length of the outer token */
dataLen += 1 + gssint_der_length_size(rspTokenSize);
}
if (mechListMIC != NULL) {
/* Length of the inner token */
micTokenSize = 1 + gssint_der_length_size(mechListMIC->length) +
mechListMIC->length;
dataLen += micTokenSize;
/* Length of the outer token */
dataLen += 1 + gssint_der_length_size(micTokenSize);
}
/*
* Add size of DER encoded:
* NegTokenTarg [ SEQUENCE ] of
* NegResult[0] ENUMERATED {
* accept_completed(0),
* accept_incomplete(1),
* reject(2) }
* supportedMech [1] MechType OPTIONAL,
* responseToken [2] OCTET STRING OPTIONAL,
* mechListMIC [3] OCTET STRING OPTIONAL
*
* size = data->length + MechListMic + SupportedMech len +
* Result Length + ASN.1 overhead
*/
NegTokenTargSize = dataLen;
dataLen += 1 + gssint_der_length_size(NegTokenTargSize);
/*
* NegotiationToken [ CHOICE ]{
* negTokenInit [0] NegTokenInit,
* negTokenTarg [1] NegTokenTarg }
*/
NegTokenSize = dataLen;
dataLen += 1 + gssint_der_length_size(NegTokenSize);
tlen = dataLen;
t = (unsigned char *) gssalloc_malloc(tlen);
if (t == NULL) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
ptr = t;
/*
* Indicate that we are sending CHOICE 1
* (NegTokenTarg)
*/
*ptr++ = CONTEXT | 0x01;
if (gssint_put_der_length(NegTokenSize, &ptr, dataLen) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
*ptr++ = SEQUENCE;
if (gssint_put_der_length(NegTokenTargSize, &ptr,
tlen - (int)(ptr-t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
/*
* First field of the NegTokenTarg SEQUENCE
* is the ENUMERATED NegResult.
*/
*ptr++ = CONTEXT;
if (gssint_put_der_length(3, &ptr,
tlen - (int)(ptr-t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_negResult(&ptr, status, tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (sendtoken == INIT_TOKEN_SEND) {
/*
* Next, is the Supported MechType
*/
*ptr++ = CONTEXT | 0x01;
if (gssint_put_der_length(mech_wanted->length + 2,
&ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_mech_oid(&ptr, mech_wanted,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
if (data != NULL && data->length > 0) {
*ptr++ = CONTEXT | 0x02;
if (gssint_put_der_length(rspTokenSize, &ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_input_token(&ptr, data,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
if (mechListMIC != NULL) {
*ptr++ = CONTEXT | 0x03;
if (gssint_put_der_length(micTokenSize, &ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_input_token(&ptr, mechListMIC,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
ret = GSS_S_COMPLETE;
errout:
if (ret != GSS_S_COMPLETE) {
if (t)
free(t);
} else {
outbuf->length = ptr - t;
outbuf->value = (void *) t;
}
return (ret);
}
/* determine size of token */
static int
g_token_size(gss_OID_const mech, unsigned int body_size)
{
int hdrsize;
/*
* Initialize the header size to the
* MECH_OID byte + the bytes needed to indicate the
* length of the OID + the OID itself.
*
* 0x06 [MECHLENFIELD] MECHDATA
*/
hdrsize = 1 + gssint_der_length_size(mech->length) + mech->length;
/*
* Now add the bytes needed for the initial header
* token bytes:
* 0x60 + [DER_LEN] + HDRSIZE
*/
hdrsize += 1 + gssint_der_length_size(body_size + hdrsize);
return (hdrsize + body_size);
}
/*
* generate token header.
*
* Use DER Definite Length method per RFC2478
* Use of indefinite length encoding will not be compatible
* with Microsoft or others that actually follow the spec.
*/
static int
g_make_token_header(gss_OID_const mech,
unsigned int body_size,
unsigned char **buf,
unsigned int totallen)
{
int ret = 0;
unsigned int hdrsize;
unsigned char *p = *buf;
hdrsize = 1 + gssint_der_length_size(mech->length) + mech->length;
*(*buf)++ = HEADER_ID;
if ((ret = gssint_put_der_length(hdrsize + body_size, buf, totallen)))
return (ret);
*(*buf)++ = MECH_OID;
if ((ret = gssint_put_der_length(mech->length, buf,
totallen - (int)(p - *buf))))
return (ret);
TWRITE_STR(*buf, mech->elements, mech->length);
return (0);
}
/*
* NOTE: This checks that the length returned by
* gssint_get_der_length() is not greater than the number of octets
* remaining, even though gssint_get_der_length() already checks, in
* theory.
*/
static int
g_get_tag_and_length(unsigned char **buf, int tag,
unsigned int buflen, unsigned int *outlen)
{
unsigned char *ptr = *buf;
int ret = -1; /* pessimists, assume failure ! */
unsigned int encoded_len;
int tmplen = 0;
*outlen = 0;
if (buflen > 1 && *ptr == tag) {
ptr++;
tmplen = gssint_get_der_length(&ptr, buflen - 1,
&encoded_len);
if (tmplen < 0) {
ret = -1;
} else if ((unsigned int)tmplen > buflen - (ptr - *buf)) {
ret = -1;
} else
ret = 0;
}
*outlen = tmplen;
*buf = ptr;
return (ret);
}
static int
g_verify_neg_token_init(unsigned char **buf_in, unsigned int cur_size)
{
unsigned char *buf = *buf_in;
unsigned char *endptr = buf + cur_size;
int seqsize;
int ret = 0;
unsigned int bytes;
/*
* Verify this is a NegotiationToken type token
* - check for a0(context specific identifier)
* - get length and verify that enoughd ata exists
*/
if (g_get_tag_and_length(&buf, CONTEXT, cur_size, &bytes) < 0)
return (G_BAD_TOK_HEADER);
cur_size = bytes; /* should indicate bytes remaining */
/*
* Verify the next piece, it should identify this as
* a strucure of type NegTokenInit.
*/
if (*buf++ == SEQUENCE) {
if ((seqsize = gssint_get_der_length(&buf, cur_size, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
/*
* Make sure we have the entire buffer as described
*/
if (seqsize > endptr - buf)
return (G_BAD_TOK_HEADER);
} else {
return (G_BAD_TOK_HEADER);
}
cur_size = seqsize; /* should indicate bytes remaining */
/*
* Verify that the first blob is a sequence of mechTypes
*/
if (*buf++ == CONTEXT) {
if ((seqsize = gssint_get_der_length(&buf, cur_size, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
/*
* Make sure we have the entire buffer as described
*/
if (seqsize > endptr - buf)
return (G_BAD_TOK_HEADER);
} else {
return (G_BAD_TOK_HEADER);
}
/*
* At this point, *buf should be at the beginning of the
* DER encoded list of mech types that are to be negotiated.
*/
*buf_in = buf;
return (ret);
}
/* verify token header. */
static int
g_verify_token_header(gss_OID_const mech,
unsigned int *body_size,
unsigned char **buf_in,
int tok_type,
unsigned int toksize)
{
unsigned char *buf = *buf_in;
int seqsize;
gss_OID_desc toid;
int ret = 0;
unsigned int bytes;
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != HEADER_ID)
return (G_BAD_TOK_HEADER);
if ((seqsize = gssint_get_der_length(&buf, toksize, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
if ((seqsize + bytes) != toksize)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != MECH_OID)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
toid.length = *buf++;
if (toksize < toid.length)
return (G_BAD_TOK_HEADER);
else
toksize -= toid.length;
toid.elements = buf;
buf += toid.length;
if (!g_OID_equal(&toid, mech))
ret = G_WRONG_MECH;
/*
* G_WRONG_MECH is not returned immediately because it's more important
* to return G_BAD_TOK_HEADER if the token header is in fact bad
*/
if (toksize < 2)
return (G_BAD_TOK_HEADER);
else
toksize -= 2;
if (!ret) {
*buf_in = buf;
*body_size = toksize;
}
return (ret);
}
/*
* Return non-zero if the oid is one of the kerberos mech oids,
* otherwise return zero.
*
* N.B. There are 3 oids that represent the kerberos mech:
* RFC-specified GSS_MECH_KRB5_OID,
* Old pre-RFC GSS_MECH_KRB5_OLD_OID,
* Incorrect MS GSS_MECH_KRB5_WRONG_OID
*/
static int
is_kerb_mech(gss_OID oid)
{
int answer = 0;
OM_uint32 minor;
extern const gss_OID_set_desc * const gss_mech_set_krb5_both;
(void) gss_test_oid_set_member(&minor,
oid, (gss_OID_set)gss_mech_set_krb5_both, &answer);
return (answer);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_2200_0 |
crossvul-cpp_data_good_3404_0 | /*
* DNxHD/VC-3 parser
* Copyright (c) 2008 Baptiste Coudurier <baptiste.coudurier@free.fr>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNxHD/VC-3 parser
*/
#include "parser.h"
#include "dnxhddata.h"
typedef struct {
ParseContext pc;
int cur_byte;
int remaining;
int w, h;
} DNXHDParserContext;
static int dnxhd_find_frame_end(DNXHDParserContext *dctx,
const uint8_t *buf, int buf_size)
{
ParseContext *pc = &dctx->pc;
uint64_t state = pc->state64;
int pic_found = pc->frame_start_found;
int i = 0;
if (!pic_found) {
for (i = 0; i < buf_size; i++) {
state = (state << 8) | buf[i];
if (ff_dnxhd_check_header_prefix(state & 0xffffffffff00LL) != 0) {
i++;
pic_found = 1;
dctx->cur_byte = 0;
dctx->remaining = 0;
break;
}
}
}
if (pic_found && !dctx->remaining) {
if (!buf_size) /* EOF considered as end of frame */
return 0;
for (; i < buf_size; i++) {
dctx->cur_byte++;
state = (state << 8) | buf[i];
if (dctx->cur_byte == 24) {
dctx->h = (state >> 32) & 0xFFFF;
} else if (dctx->cur_byte == 26) {
dctx->w = (state >> 32) & 0xFFFF;
} else if (dctx->cur_byte == 42) {
int cid = (state >> 32) & 0xFFFFFFFF;
int remaining;
if (cid <= 0)
continue;
remaining = avpriv_dnxhd_get_frame_size(cid);
if (remaining <= 0) {
remaining = ff_dnxhd_get_hr_frame_size(cid, dctx->w, dctx->h);
if (remaining <= 0)
continue;
}
dctx->remaining = remaining;
if (buf_size - i + 47 >= dctx->remaining) {
int remaining = dctx->remaining;
pc->frame_start_found = 0;
pc->state64 = -1;
dctx->cur_byte = 0;
dctx->remaining = 0;
return remaining;
} else {
dctx->remaining -= buf_size;
}
}
}
} else if (pic_found) {
if (dctx->remaining > buf_size) {
dctx->remaining -= buf_size;
} else {
int remaining = dctx->remaining;
pc->frame_start_found = 0;
pc->state64 = -1;
dctx->cur_byte = 0;
dctx->remaining = 0;
return remaining;
}
}
pc->frame_start_found = pic_found;
pc->state64 = state;
return END_NOT_FOUND;
}
static int dnxhd_parse(AVCodecParserContext *s,
AVCodecContext *avctx,
const uint8_t **poutbuf, int *poutbuf_size,
const uint8_t *buf, int buf_size)
{
DNXHDParserContext *dctx = s->priv_data;
ParseContext *pc = &dctx->pc;
int next;
if (s->flags & PARSER_FLAG_COMPLETE_FRAMES) {
next = buf_size;
} else {
next = dnxhd_find_frame_end(dctx, buf, buf_size);
if (ff_combine_frame(pc, next, &buf, &buf_size) < 0) {
*poutbuf = NULL;
*poutbuf_size = 0;
return buf_size;
}
}
*poutbuf = buf;
*poutbuf_size = buf_size;
return next;
}
AVCodecParser ff_dnxhd_parser = {
.codec_ids = { AV_CODEC_ID_DNXHD },
.priv_data_size = sizeof(DNXHDParserContext),
.parser_parse = dnxhd_parse,
.parser_close = ff_parse_close,
};
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3404_0 |
crossvul-cpp_data_bad_3060_8 | /* Key type used to cache DNS lookups made by the kernel
*
* See Documentation/networking/dns_resolver.txt
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov (niallain@gmail.com)
* Steve French (sfrench@us.ibm.com)
* Wang Lei (wang840925@gmail.com)
* David Howells (dhowells@redhat.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <keys/dns_resolver-type.h>
#include <keys/user-type.h>
#include "internal.h"
MODULE_DESCRIPTION("DNS Resolver");
MODULE_AUTHOR("Wang Lei");
MODULE_LICENSE("GPL");
unsigned int dns_resolver_debug;
module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
const struct cred *dns_resolver_cache;
#define DNS_ERRORNO_OPTION "dnserror"
/*
* Preparse instantiation data for a dns_resolver key.
*
* The data must be a NUL-terminated string, with the NUL char accounted in
* datalen.
*
* If the data contains a '#' characters, then we take the clause after each
* one to be an option of the form 'key=value'. The actual data of interest is
* the string leading up to the first '#'. For instance:
*
* "ip1,ip2,...#foo=bar"
*/
static int
dns_resolver_preparse(struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload;
unsigned long derrno;
int ret;
int datalen = prep->datalen, result_len = 0;
const char *data = prep->data, *end, *opt;
kenter("'%*.*s',%u", datalen, datalen, data, datalen);
if (datalen <= 1 || !data || data[datalen - 1] != '\0')
return -EINVAL;
datalen--;
/* deal with any options embedded in the data */
end = data + datalen;
opt = memchr(data, '#', datalen);
if (!opt) {
/* no options: the entire data is the result */
kdebug("no options");
result_len = datalen;
} else {
const char *next_opt;
result_len = opt - data;
opt++;
kdebug("options: '%s'", opt);
do {
const char *eq;
int opt_len, opt_nlen, opt_vlen, tmp;
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
if (!opt_len) {
printk(KERN_WARNING
"Empty option to dns_resolver key\n");
return -EINVAL;
}
eq = memchr(opt, '=', opt_len) ?: end;
opt_nlen = eq - opt;
eq++;
opt_vlen = next_opt - eq; /* will be -1 if no value */
tmp = opt_vlen >= 0 ? opt_vlen : 0;
kdebug("option '%*.*s' val '%*.*s'",
opt_nlen, opt_nlen, opt, tmp, tmp, eq);
/* see if it's an error number representing a DNS error
* that's to be recorded as the result in this key */
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
kdebug("dns error number option");
if (opt_vlen <= 0)
goto bad_option_value;
ret = kstrtoul(eq, 10, &derrno);
if (ret < 0)
goto bad_option_value;
if (derrno < 1 || derrno > 511)
goto bad_option_value;
kdebug("dns error no. = %lu", derrno);
prep->type_data[0] = ERR_PTR(-derrno);
continue;
}
bad_option_value:
printk(KERN_WARNING
"Option '%*.*s' to dns_resolver key:"
" bad/missing value\n",
opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
/* don't cache the result if we're caching an error saying there's no
* result */
if (prep->type_data[0]) {
kleave(" = 0 [h_error %ld]", PTR_ERR(prep->type_data[0]));
return 0;
}
kdebug("store result");
prep->quotalen = result_len;
upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL);
if (!upayload) {
kleave(" = -ENOMEM");
return -ENOMEM;
}
upayload->datalen = result_len;
memcpy(upayload->data, data, result_len);
upayload->data[result_len] = '\0';
prep->payload[0] = upayload;
kleave(" = 0");
return 0;
}
/*
* Clean up the preparse data
*/
static void dns_resolver_free_preparse(struct key_preparsed_payload *prep)
{
pr_devel("==>%s()\n", __func__);
kfree(prep->payload[0]);
}
/*
* The description is of the form "[<type>:]<domain_name>"
*
* The domain name may be a simple name or an absolute domain name (which
* should end with a period). The domain name is case-independent.
*/
static int
dns_resolver_match(const struct key *key,
const struct key_match_data *match_data)
{
int slen, dlen, ret = 0;
const char *src = key->description, *dsp = match_data->raw_data;
kenter("%s,%s", src, dsp);
if (!src || !dsp)
goto no_match;
if (strcasecmp(src, dsp) == 0)
goto matched;
slen = strlen(src);
dlen = strlen(dsp);
if (slen <= 0 || dlen <= 0)
goto no_match;
if (src[slen - 1] == '.')
slen--;
if (dsp[dlen - 1] == '.')
dlen--;
if (slen != dlen || strncasecmp(src, dsp, slen) != 0)
goto no_match;
matched:
ret = 1;
no_match:
kleave(" = %d", ret);
return ret;
}
/*
* Describe a DNS key
*/
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
int err = key->type_data.x[0];
seq_puts(m, key->description);
if (key_is_instantiated(key)) {
if (err)
seq_printf(m, ": %d", err);
else
seq_printf(m, ": %u", key->datalen);
}
}
/*
* read the DNS data
* - the key's semaphore is read-locked
*/
static long dns_resolver_read(const struct key *key,
char __user *buffer, size_t buflen)
{
if (key->type_data.x[0])
return key->type_data.x[0];
return user_read(key, buffer, buflen);
}
struct key_type key_type_dns_resolver = {
.name = "dns_resolver",
.preparse = dns_resolver_preparse,
.free_preparse = dns_resolver_free_preparse,
.instantiate = generic_key_instantiate,
.match = dns_resolver_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = dns_resolver_describe,
.read = dns_resolver_read,
};
static int __init init_dns_resolver(void)
{
struct cred *cred;
struct key *keyring;
int ret;
/* create an override credential set with a special thread keyring in
* which DNS requests are cached
*
* this is used to prevent malicious redirections from being installed
* with add_key().
*/
cred = prepare_kernel_cred(NULL);
if (!cred)
return -ENOMEM;
keyring = keyring_alloc(".dns_resolver",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
ret = register_key_type(&key_type_dns_resolver);
if (ret < 0)
goto failed_put_key;
/* instruct request_key() to use this special keyring as a cache for
* the results it looks up */
set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
dns_resolver_cache = cred;
kdebug("DNS resolver keyring: %d\n", key_serial(keyring));
return 0;
failed_put_key:
key_put(keyring);
failed_put_cred:
put_cred(cred);
return ret;
}
static void __exit exit_dns_resolver(void)
{
key_revoke(dns_resolver_cache->thread_keyring);
unregister_key_type(&key_type_dns_resolver);
put_cred(dns_resolver_cache);
}
module_init(init_dns_resolver)
module_exit(exit_dns_resolver)
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3060_8 |
crossvul-cpp_data_bad_192_2 | /*
* The simplest mpeg encoder (well, it was the simplest!)
* Copyright (c) 2000,2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* The simplest mpeg encoder (well, it was the simplest!).
*/
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/motion_vector.h"
#include "libavutil/timer.h"
#include "avcodec.h"
#include "blockdsp.h"
#include "h264chroma.h"
#include "idctdsp.h"
#include "internal.h"
#include "mathops.h"
#include "mpeg_er.h"
#include "mpegutils.h"
#include "mpegvideo.h"
#include "mpegvideodata.h"
#include "mjpegenc.h"
#include "msmpeg4.h"
#include "qpeldsp.h"
#include "thread.h"
#include "wmv2.h"
#include <limits.h>
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
int i, level, nCoeffs;
const uint16_t *quant_matrix;
nCoeffs= s->block_last_index[n];
block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
/* XXX: only MPEG-1 */
quant_matrix = s->intra_matrix;
for(i=1;i<=nCoeffs;i++) {
int j= s->intra_scantable.permutated[i];
level = block[j];
if (level) {
if (level < 0) {
level = -level;
level = (int)(level * qscale * quant_matrix[j]) >> 3;
level = (level - 1) | 1;
level = -level;
} else {
level = (int)(level * qscale * quant_matrix[j]) >> 3;
level = (level - 1) | 1;
}
block[j] = level;
}
}
}
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
int i, level, nCoeffs;
const uint16_t *quant_matrix;
nCoeffs= s->block_last_index[n];
quant_matrix = s->inter_matrix;
for(i=0; i<=nCoeffs; i++) {
int j= s->intra_scantable.permutated[i];
level = block[j];
if (level) {
if (level < 0) {
level = -level;
level = (((level << 1) + 1) * qscale *
((int) (quant_matrix[j]))) >> 4;
level = (level - 1) | 1;
level = -level;
} else {
level = (((level << 1) + 1) * qscale *
((int) (quant_matrix[j]))) >> 4;
level = (level - 1) | 1;
}
block[j] = level;
}
}
}
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
int i, level, nCoeffs;
const uint16_t *quant_matrix;
if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
else qscale <<= 1;
if(s->alternate_scan) nCoeffs= 63;
else nCoeffs= s->block_last_index[n];
block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
quant_matrix = s->intra_matrix;
for(i=1;i<=nCoeffs;i++) {
int j= s->intra_scantable.permutated[i];
level = block[j];
if (level) {
if (level < 0) {
level = -level;
level = (int)(level * qscale * quant_matrix[j]) >> 4;
level = -level;
} else {
level = (int)(level * qscale * quant_matrix[j]) >> 4;
}
block[j] = level;
}
}
}
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
int i, level, nCoeffs;
const uint16_t *quant_matrix;
int sum=-1;
if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
else qscale <<= 1;
if(s->alternate_scan) nCoeffs= 63;
else nCoeffs= s->block_last_index[n];
block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
sum += block[0];
quant_matrix = s->intra_matrix;
for(i=1;i<=nCoeffs;i++) {
int j= s->intra_scantable.permutated[i];
level = block[j];
if (level) {
if (level < 0) {
level = -level;
level = (int)(level * qscale * quant_matrix[j]) >> 4;
level = -level;
} else {
level = (int)(level * qscale * quant_matrix[j]) >> 4;
}
block[j] = level;
sum+=level;
}
}
block[63]^=sum&1;
}
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
int i, level, nCoeffs;
const uint16_t *quant_matrix;
int sum=-1;
if (s->q_scale_type) qscale = ff_mpeg2_non_linear_qscale[qscale];
else qscale <<= 1;
if(s->alternate_scan) nCoeffs= 63;
else nCoeffs= s->block_last_index[n];
quant_matrix = s->inter_matrix;
for(i=0; i<=nCoeffs; i++) {
int j= s->intra_scantable.permutated[i];
level = block[j];
if (level) {
if (level < 0) {
level = -level;
level = (((level << 1) + 1) * qscale *
((int) (quant_matrix[j]))) >> 5;
level = -level;
} else {
level = (((level << 1) + 1) * qscale *
((int) (quant_matrix[j]))) >> 5;
}
block[j] = level;
sum+=level;
}
}
block[63]^=sum&1;
}
static void dct_unquantize_h263_intra_c(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
int i, level, qmul, qadd;
int nCoeffs;
av_assert2(s->block_last_index[n]>=0 || s->h263_aic);
qmul = qscale << 1;
if (!s->h263_aic) {
block[0] *= n < 4 ? s->y_dc_scale : s->c_dc_scale;
qadd = (qscale - 1) | 1;
}else{
qadd = 0;
}
if(s->ac_pred)
nCoeffs=63;
else
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
for(i=1; i<=nCoeffs; i++) {
level = block[i];
if (level) {
if (level < 0) {
level = level * qmul - qadd;
} else {
level = level * qmul + qadd;
}
block[i] = level;
}
}
}
static void dct_unquantize_h263_inter_c(MpegEncContext *s,
int16_t *block, int n, int qscale)
{
int i, level, qmul, qadd;
int nCoeffs;
av_assert2(s->block_last_index[n]>=0);
qadd = (qscale - 1) | 1;
qmul = qscale << 1;
nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
for(i=0; i<=nCoeffs; i++) {
level = block[i];
if (level) {
if (level < 0) {
level = level * qmul - qadd;
} else {
level = level * qmul + qadd;
}
block[i] = level;
}
}
}
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
{
while(h--)
memset(dst + h*linesize, 128, 16);
}
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
{
while(h--)
memset(dst + h*linesize, 128, 8);
}
/* init common dct for both encoder and decoder */
static av_cold int dct_init(MpegEncContext *s)
{
ff_blockdsp_init(&s->bdsp, s->avctx);
ff_h264chroma_init(&s->h264chroma, 8); //for lowres
ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
ff_mpegvideodsp_init(&s->mdsp);
ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
if (s->avctx->debug & FF_DEBUG_NOMC) {
int i;
for (i=0; i<4; i++) {
s->hdsp.avg_pixels_tab[0][i] = gray16;
s->hdsp.put_pixels_tab[0][i] = gray16;
s->hdsp.put_no_rnd_pixels_tab[0][i] = gray16;
s->hdsp.avg_pixels_tab[1][i] = gray8;
s->hdsp.put_pixels_tab[1][i] = gray8;
s->hdsp.put_no_rnd_pixels_tab[1][i] = gray8;
}
}
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
if (s->avctx->flags & AV_CODEC_FLAG_BITEXACT)
s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
if (HAVE_INTRINSICS_NEON)
ff_mpv_common_init_neon(s);
if (ARCH_ALPHA)
ff_mpv_common_init_axp(s);
if (ARCH_ARM)
ff_mpv_common_init_arm(s);
if (ARCH_PPC)
ff_mpv_common_init_ppc(s);
if (ARCH_X86)
ff_mpv_common_init_x86(s);
if (ARCH_MIPS)
ff_mpv_common_init_mips(s);
return 0;
}
av_cold void ff_mpv_idct_init(MpegEncContext *s)
{
ff_idctdsp_init(&s->idsp, s->avctx);
/* load & permutate scantables
* note: only wmv uses different ones
*/
if (s->alternate_scan) {
ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_alternate_vertical_scan);
ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_alternate_vertical_scan);
} else {
ff_init_scantable(s->idsp.idct_permutation, &s->inter_scantable, ff_zigzag_direct);
ff_init_scantable(s->idsp.idct_permutation, &s->intra_scantable, ff_zigzag_direct);
}
ff_init_scantable(s->idsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
}
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
{
return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 0,
s->chroma_x_shift, s->chroma_y_shift, s->out_format,
s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
&s->linesize, &s->uvlinesize);
}
static int init_duplicate_context(MpegEncContext *s)
{
int y_size = s->b8_stride * (2 * s->mb_height + 1);
int c_size = s->mb_stride * (s->mb_height + 1);
int yc_size = y_size + 2 * c_size;
int i;
if (s->mb_height & 1)
yc_size += 2*s->b8_stride + 2*s->mb_stride;
s->sc.edge_emu_buffer =
s->me.scratchpad =
s->me.temp =
s->sc.rd_scratchpad =
s->sc.b_scratchpad =
s->sc.obmc_scratchpad = NULL;
if (s->encoding) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
ME_MAP_SIZE * sizeof(uint32_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map,
ME_MAP_SIZE * sizeof(uint32_t), fail)
if (s->noise_reduction) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum,
2 * 64 * sizeof(int), fail)
}
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64 * 12 * 2 * sizeof(int16_t), fail)
s->block = s->blocks[0];
for (i = 0; i < 12; i++) {
s->pblocks[i] = &s->block[i];
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->block32, sizeof(*s->block32), fail)
if (s->avctx->codec_tag == AV_RL32("VCR2")) {
// exchange uv
FFSWAP(void *, s->pblocks[4], s->pblocks[5]);
}
if (s->out_format == FMT_H263) {
/* ac values */
FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base,
yc_size * sizeof(int16_t) * 16, fail);
s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
s->ac_val[2] = s->ac_val[1] + c_size;
}
return 0;
fail:
return -1; // free() through ff_mpv_common_end()
}
static void free_duplicate_context(MpegEncContext *s)
{
if (!s)
return;
av_freep(&s->sc.edge_emu_buffer);
av_freep(&s->me.scratchpad);
s->me.temp =
s->sc.rd_scratchpad =
s->sc.b_scratchpad =
s->sc.obmc_scratchpad = NULL;
av_freep(&s->dct_error_sum);
av_freep(&s->me.map);
av_freep(&s->me.score_map);
av_freep(&s->blocks);
av_freep(&s->block32);
av_freep(&s->ac_val_base);
s->block = NULL;
}
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
{
#define COPY(a) bak->a = src->a
COPY(sc.edge_emu_buffer);
COPY(me.scratchpad);
COPY(me.temp);
COPY(sc.rd_scratchpad);
COPY(sc.b_scratchpad);
COPY(sc.obmc_scratchpad);
COPY(me.map);
COPY(me.score_map);
COPY(blocks);
COPY(block);
COPY(block32);
COPY(start_mb_y);
COPY(end_mb_y);
COPY(me.map_generation);
COPY(pb);
COPY(dct_error_sum);
COPY(dct_count[0]);
COPY(dct_count[1]);
COPY(ac_val_base);
COPY(ac_val[0]);
COPY(ac_val[1]);
COPY(ac_val[2]);
#undef COPY
}
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
{
MpegEncContext bak;
int i, ret;
// FIXME copy only needed parts
// START_TIMER
backup_duplicate_context(&bak, dst);
memcpy(dst, src, sizeof(MpegEncContext));
backup_duplicate_context(dst, &bak);
for (i = 0; i < 12; i++) {
dst->pblocks[i] = &dst->block[i];
}
if (dst->avctx->codec_tag == AV_RL32("VCR2")) {
// exchange uv
FFSWAP(void *, dst->pblocks[4], dst->pblocks[5]);
}
if (!dst->sc.edge_emu_buffer &&
(ret = ff_mpeg_framesize_alloc(dst->avctx, &dst->me,
&dst->sc, dst->linesize)) < 0) {
av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
"scratch buffers.\n");
return ret;
}
// STOP_TIMER("update_duplicate_context")
// about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
return 0;
}
int ff_mpeg_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src)
{
int i, ret;
MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
if (dst == src)
return 0;
av_assert0(s != s1);
// FIXME can parameters change on I-frames?
// in that case dst may need a reinit
if (!s->context_initialized) {
int err;
memcpy(s, s1, sizeof(MpegEncContext));
s->avctx = dst;
s->bitstream_buffer = NULL;
s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
if (s1->context_initialized){
// s->picture_range_start += MAX_PICTURE_COUNT;
// s->picture_range_end += MAX_PICTURE_COUNT;
ff_mpv_idct_init(s);
if((err = ff_mpv_common_init(s)) < 0){
memset(s, 0, sizeof(MpegEncContext));
s->avctx = dst;
return err;
}
}
}
if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
s->context_reinit = 0;
s->height = s1->height;
s->width = s1->width;
if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
return ret;
}
s->avctx->coded_height = s1->avctx->coded_height;
s->avctx->coded_width = s1->avctx->coded_width;
s->avctx->width = s1->avctx->width;
s->avctx->height = s1->avctx->height;
s->coded_picture_number = s1->coded_picture_number;
s->picture_number = s1->picture_number;
av_assert0(!s->picture || s->picture != s1->picture);
if(s->picture)
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
if (s1->picture && s1->picture[i].f->buf[0] &&
(ret = ff_mpeg_ref_picture(s->avctx, &s->picture[i], &s1->picture[i])) < 0)
return ret;
}
#define UPDATE_PICTURE(pic)\
do {\
ff_mpeg_unref_picture(s->avctx, &s->pic);\
if (s1->pic.f && s1->pic.f->buf[0])\
ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\
else\
ret = ff_update_picture_tables(&s->pic, &s1->pic);\
if (ret < 0)\
return ret;\
} while (0)
UPDATE_PICTURE(current_picture);
UPDATE_PICTURE(last_picture);
UPDATE_PICTURE(next_picture);
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \
((pic && pic >= old_ctx->picture && \
pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \
&new_ctx->picture[pic - old_ctx->picture] : NULL)
s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
// Error/bug resilience
s->next_p_frame_damaged = s1->next_p_frame_damaged;
s->workaround_bugs = s1->workaround_bugs;
s->padding_bug_score = s1->padding_bug_score;
// MPEG-4 timing info
memcpy(&s->last_time_base, &s1->last_time_base,
(char *) &s1->pb_field_time + sizeof(s1->pb_field_time) -
(char *) &s1->last_time_base);
// B-frame info
s->max_b_frames = s1->max_b_frames;
s->low_delay = s1->low_delay;
s->droppable = s1->droppable;
// DivX handling (doesn't work)
s->divx_packed = s1->divx_packed;
if (s1->bitstream_buffer) {
if (s1->bitstream_buffer_size +
AV_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size) {
av_fast_malloc(&s->bitstream_buffer,
&s->allocated_bitstream_buffer_size,
s1->allocated_bitstream_buffer_size);
if (!s->bitstream_buffer) {
s->bitstream_buffer_size = 0;
return AVERROR(ENOMEM);
}
}
s->bitstream_buffer_size = s1->bitstream_buffer_size;
memcpy(s->bitstream_buffer, s1->bitstream_buffer,
s1->bitstream_buffer_size);
memset(s->bitstream_buffer + s->bitstream_buffer_size, 0,
AV_INPUT_BUFFER_PADDING_SIZE);
}
// linesize-dependent scratch buffer allocation
if (!s->sc.edge_emu_buffer)
if (s1->linesize) {
if (ff_mpeg_framesize_alloc(s->avctx, &s->me,
&s->sc, s1->linesize) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
"scratch buffers.\n");
return AVERROR(ENOMEM);
}
} else {
av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
"be allocated due to unknown size.\n");
}
// MPEG-2/interlacing info
memcpy(&s->progressive_sequence, &s1->progressive_sequence,
(char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
if (!s1->first_field) {
s->last_pict_type = s1->pict_type;
if (s1->current_picture_ptr)
s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f->quality;
}
return 0;
}
/**
* Set the given MpegEncContext to common defaults
* (same for encoding and decoding).
* The changed fields will not depend upon the
* prior state of the MpegEncContext.
*/
void ff_mpv_common_defaults(MpegEncContext *s)
{
s->y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
s->chroma_qscale_table = ff_default_chroma_qscale_table;
s->progressive_frame = 1;
s->progressive_sequence = 1;
s->picture_structure = PICT_FRAME;
s->coded_picture_number = 0;
s->picture_number = 0;
s->f_code = 1;
s->b_code = 1;
s->slice_context_count = 1;
}
/**
* Set the given MpegEncContext to defaults for decoding.
* the changed fields will not depend upon
* the prior state of the MpegEncContext.
*/
void ff_mpv_decode_defaults(MpegEncContext *s)
{
ff_mpv_common_defaults(s);
}
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
{
s->avctx = avctx;
s->width = avctx->coded_width;
s->height = avctx->coded_height;
s->codec_id = avctx->codec->id;
s->workaround_bugs = avctx->workaround_bugs;
/* convert fourcc to upper case */
s->codec_tag = avpriv_toupper4(avctx->codec_tag);
}
/**
* Initialize and allocates MpegEncContext fields dependent on the resolution.
*/
static int init_context_frame(MpegEncContext *s)
{
int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
s->mb_width = (s->width + 15) / 16;
s->mb_stride = s->mb_width + 1;
s->b8_stride = s->mb_width * 2 + 1;
mb_array_size = s->mb_height * s->mb_stride;
mv_table_size = (s->mb_height + 2) * s->mb_stride + 1;
/* set default edge pos, will be overridden
* in decode_header if needed */
s->h_edge_pos = s->mb_width * 16;
s->v_edge_pos = s->mb_height * 16;
s->mb_num = s->mb_width * s->mb_height;
s->block_wrap[0] =
s->block_wrap[1] =
s->block_wrap[2] =
s->block_wrap[3] = s->b8_stride;
s->block_wrap[4] =
s->block_wrap[5] = s->mb_stride;
y_size = s->b8_stride * (2 * s->mb_height + 1);
c_size = s->mb_stride * (s->mb_height + 1);
yc_size = y_size + 2 * c_size;
if (s->mb_height & 1)
yc_size += 2*s->b8_stride + 2*s->mb_stride;
FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num + 1) * sizeof(int),
fail); // error resilience code looks cleaner with this
for (y = 0; y < s->mb_height; y++)
for (x = 0; x < s->mb_width; x++)
s->mb_index2xy[x + y * s->mb_width] = x + y * s->mb_stride;
s->mb_index2xy[s->mb_height * s->mb_width] = (s->mb_height - 1) * s->mb_stride + s->mb_width; // FIXME really needed?
if (s->encoding) {
/* Allocate MV tables */
FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base, mv_table_size * 2 * sizeof(int16_t), fail)
s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
s->b_bidir_forw_mv_table = s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
s->b_bidir_back_mv_table = s->b_bidir_back_mv_table_base + s->mb_stride + 1;
s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
/* Allocate MB type table */
FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type, mb_array_size * sizeof(uint16_t), fail) // needed for encoding
FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
FF_ALLOC_OR_GOTO(s->avctx, s->cplx_tab,
mb_array_size * sizeof(float), fail);
FF_ALLOC_OR_GOTO(s->avctx, s->bits_tab,
mb_array_size * sizeof(float), fail);
}
if (s->codec_id == AV_CODEC_ID_MPEG4 ||
(s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME)) {
/* interlaced direct mode decoding tables */
for (i = 0; i < 2; i++) {
int j, k;
for (j = 0; j < 2; j++) {
for (k = 0; k < 2; k++) {
FF_ALLOCZ_OR_GOTO(s->avctx,
s->b_field_mv_table_base[i][j][k],
mv_table_size * 2 * sizeof(int16_t),
fail);
s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] +
s->mb_stride + 1;
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j] + s->mb_stride + 1;
}
FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
}
}
if (s->out_format == FMT_H263) {
/* cbp values */
FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size + (s->mb_height&1)*2*s->b8_stride, fail);
s->coded_block = s->coded_block_base + s->b8_stride + 1;
/* cbp, ac_pred, pred_dir */
FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail);
}
if (s->h263_pred || s->h263_plus || !s->encoding) {
/* dc values */
// MN: we need these for error resilience of intra-frames
FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
s->dc_val[2] = s->dc_val[1] + c_size;
for (i = 0; i < yc_size; i++)
s->dc_val_base[i] = 1024;
}
/* which mb is an intra block */
FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
memset(s->mbintra_table, 1, mb_array_size);
/* init macroblock skip table */
FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size + 2, fail);
// Note the + 1 is for a quicker MPEG-4 slice_end detection
return ff_mpeg_er_init(s);
fail:
return AVERROR(ENOMEM);
}
static void clear_context(MpegEncContext *s)
{
int i, j, k;
memset(&s->next_picture, 0, sizeof(s->next_picture));
memset(&s->last_picture, 0, sizeof(s->last_picture));
memset(&s->current_picture, 0, sizeof(s->current_picture));
memset(&s->new_picture, 0, sizeof(s->new_picture));
memset(s->thread_context, 0, sizeof(s->thread_context));
s->me.map = NULL;
s->me.score_map = NULL;
s->dct_error_sum = NULL;
s->block = NULL;
s->blocks = NULL;
s->block32 = NULL;
memset(s->pblocks, 0, sizeof(s->pblocks));
s->ac_val_base = NULL;
s->ac_val[0] =
s->ac_val[1] =
s->ac_val[2] =NULL;
s->sc.edge_emu_buffer = NULL;
s->me.scratchpad = NULL;
s->me.temp =
s->sc.rd_scratchpad =
s->sc.b_scratchpad =
s->sc.obmc_scratchpad = NULL;
s->bitstream_buffer = NULL;
s->allocated_bitstream_buffer_size = 0;
s->picture = NULL;
s->mb_type = NULL;
s->p_mv_table_base = NULL;
s->b_forw_mv_table_base = NULL;
s->b_back_mv_table_base = NULL;
s->b_bidir_forw_mv_table_base = NULL;
s->b_bidir_back_mv_table_base = NULL;
s->b_direct_mv_table_base = NULL;
s->p_mv_table = NULL;
s->b_forw_mv_table = NULL;
s->b_back_mv_table = NULL;
s->b_bidir_forw_mv_table = NULL;
s->b_bidir_back_mv_table = NULL;
s->b_direct_mv_table = NULL;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k < 2; k++) {
s->b_field_mv_table_base[i][j][k] = NULL;
s->b_field_mv_table[i][j][k] = NULL;
}
s->b_field_select_table[i][j] = NULL;
s->p_field_mv_table_base[i][j] = NULL;
s->p_field_mv_table[i][j] = NULL;
}
s->p_field_select_table[i] = NULL;
}
s->dc_val_base = NULL;
s->coded_block_base = NULL;
s->mbintra_table = NULL;
s->cbp_table = NULL;
s->pred_dir_table = NULL;
s->mbskip_table = NULL;
s->er.error_status_table = NULL;
s->er.er_temp_buffer = NULL;
s->mb_index2xy = NULL;
s->lambda_table = NULL;
s->cplx_tab = NULL;
s->bits_tab = NULL;
}
/**
* init common structure for both encoder and decoder.
* this assumes that some variables like width/height are already set
*/
av_cold int ff_mpv_common_init(MpegEncContext *s)
{
int i, ret;
int nb_slices = (HAVE_THREADS &&
s->avctx->active_thread_type & FF_THREAD_SLICE) ?
s->avctx->thread_count : 1;
clear_context(s);
if (s->encoding && s->avctx->slices)
nb_slices = s->avctx->slices;
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
s->mb_height = (s->height + 31) / 32 * 2;
else
s->mb_height = (s->height + 15) / 16;
if (s->avctx->pix_fmt == AV_PIX_FMT_NONE) {
av_log(s->avctx, AV_LOG_ERROR,
"decoding to AV_PIX_FMT_NONE is not supported.\n");
return -1;
}
if (nb_slices > MAX_THREADS || (nb_slices > s->mb_height && s->mb_height)) {
int max_slices;
if (s->mb_height)
max_slices = FFMIN(MAX_THREADS, s->mb_height);
else
max_slices = MAX_THREADS;
av_log(s->avctx, AV_LOG_WARNING, "too many threads/slices (%d),"
" reducing to %d\n", nb_slices, max_slices);
nb_slices = max_slices;
}
if ((s->width || s->height) &&
av_image_check_size(s->width, s->height, 0, s->avctx))
return -1;
dct_init(s);
/* set chroma shifts */
ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
&s->chroma_x_shift,
&s->chroma_y_shift);
if (ret)
return ret;
FF_ALLOCZ_OR_GOTO(s->avctx, s->picture,
MAX_PICTURE_COUNT * sizeof(Picture), fail);
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
s->picture[i].f = av_frame_alloc();
if (!s->picture[i].f)
goto fail;
}
s->next_picture.f = av_frame_alloc();
if (!s->next_picture.f)
goto fail;
s->last_picture.f = av_frame_alloc();
if (!s->last_picture.f)
goto fail;
s->current_picture.f = av_frame_alloc();
if (!s->current_picture.f)
goto fail;
s->new_picture.f = av_frame_alloc();
if (!s->new_picture.f)
goto fail;
if (init_context_frame(s))
goto fail;
s->parse_context.state = -1;
s->context_initialized = 1;
memset(s->thread_context, 0, sizeof(s->thread_context));
s->thread_context[0] = s;
// if (s->width && s->height) {
if (nb_slices > 1) {
for (i = 0; i < nb_slices; i++) {
if (i) {
s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
if (!s->thread_context[i])
goto fail;
}
if (init_duplicate_context(s->thread_context[i]) < 0)
goto fail;
s->thread_context[i]->start_mb_y =
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
s->thread_context[i]->end_mb_y =
(s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
}
} else {
if (init_duplicate_context(s) < 0)
goto fail;
s->start_mb_y = 0;
s->end_mb_y = s->mb_height;
}
s->slice_context_count = nb_slices;
// }
return 0;
fail:
ff_mpv_common_end(s);
return -1;
}
/**
* Frees and resets MpegEncContext fields depending on the resolution.
* Is used during resolution changes to avoid a full reinitialization of the
* codec.
*/
static void free_context_frame(MpegEncContext *s)
{
int i, j, k;
av_freep(&s->mb_type);
av_freep(&s->p_mv_table_base);
av_freep(&s->b_forw_mv_table_base);
av_freep(&s->b_back_mv_table_base);
av_freep(&s->b_bidir_forw_mv_table_base);
av_freep(&s->b_bidir_back_mv_table_base);
av_freep(&s->b_direct_mv_table_base);
s->p_mv_table = NULL;
s->b_forw_mv_table = NULL;
s->b_back_mv_table = NULL;
s->b_bidir_forw_mv_table = NULL;
s->b_bidir_back_mv_table = NULL;
s->b_direct_mv_table = NULL;
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k < 2; k++) {
av_freep(&s->b_field_mv_table_base[i][j][k]);
s->b_field_mv_table[i][j][k] = NULL;
}
av_freep(&s->b_field_select_table[i][j]);
av_freep(&s->p_field_mv_table_base[i][j]);
s->p_field_mv_table[i][j] = NULL;
}
av_freep(&s->p_field_select_table[i]);
}
av_freep(&s->dc_val_base);
av_freep(&s->coded_block_base);
av_freep(&s->mbintra_table);
av_freep(&s->cbp_table);
av_freep(&s->pred_dir_table);
av_freep(&s->mbskip_table);
av_freep(&s->er.error_status_table);
av_freep(&s->er.er_temp_buffer);
av_freep(&s->mb_index2xy);
av_freep(&s->lambda_table);
av_freep(&s->cplx_tab);
av_freep(&s->bits_tab);
s->linesize = s->uvlinesize = 0;
}
int ff_mpv_common_frame_size_change(MpegEncContext *s)
{
int i, err = 0;
if (!s->context_initialized)
return AVERROR(EINVAL);
if (s->slice_context_count > 1) {
for (i = 0; i < s->slice_context_count; i++) {
free_duplicate_context(s->thread_context[i]);
}
for (i = 1; i < s->slice_context_count; i++) {
av_freep(&s->thread_context[i]);
}
} else
free_duplicate_context(s);
free_context_frame(s);
if (s->picture)
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
s->picture[i].needs_realloc = 1;
}
s->last_picture_ptr =
s->next_picture_ptr =
s->current_picture_ptr = NULL;
// init
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
s->mb_height = (s->height + 31) / 32 * 2;
else
s->mb_height = (s->height + 15) / 16;
if ((s->width || s->height) &&
(err = av_image_check_size(s->width, s->height, 0, s->avctx)) < 0)
goto fail;
if ((err = init_context_frame(s)))
goto fail;
memset(s->thread_context, 0, sizeof(s->thread_context));
s->thread_context[0] = s;
if (s->width && s->height) {
int nb_slices = s->slice_context_count;
if (nb_slices > 1) {
for (i = 0; i < nb_slices; i++) {
if (i) {
s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
if (!s->thread_context[i]) {
err = AVERROR(ENOMEM);
goto fail;
}
}
if ((err = init_duplicate_context(s->thread_context[i])) < 0)
goto fail;
s->thread_context[i]->start_mb_y =
(s->mb_height * (i) + nb_slices / 2) / nb_slices;
s->thread_context[i]->end_mb_y =
(s->mb_height * (i + 1) + nb_slices / 2) / nb_slices;
}
} else {
err = init_duplicate_context(s);
if (err < 0)
goto fail;
s->start_mb_y = 0;
s->end_mb_y = s->mb_height;
}
s->slice_context_count = nb_slices;
}
return 0;
fail:
ff_mpv_common_end(s);
return err;
}
/* init common structure for both encoder and decoder */
void ff_mpv_common_end(MpegEncContext *s)
{
int i;
if (!s)
return ;
if (s->slice_context_count > 1) {
for (i = 0; i < s->slice_context_count; i++) {
free_duplicate_context(s->thread_context[i]);
}
for (i = 1; i < s->slice_context_count; i++) {
av_freep(&s->thread_context[i]);
}
s->slice_context_count = 1;
} else free_duplicate_context(s);
av_freep(&s->parse_context.buffer);
s->parse_context.buffer_size = 0;
av_freep(&s->bitstream_buffer);
s->allocated_bitstream_buffer_size = 0;
if (s->picture) {
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
ff_free_picture_tables(&s->picture[i]);
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
av_frame_free(&s->picture[i].f);
}
}
av_freep(&s->picture);
ff_free_picture_tables(&s->last_picture);
ff_mpeg_unref_picture(s->avctx, &s->last_picture);
av_frame_free(&s->last_picture.f);
ff_free_picture_tables(&s->current_picture);
ff_mpeg_unref_picture(s->avctx, &s->current_picture);
av_frame_free(&s->current_picture.f);
ff_free_picture_tables(&s->next_picture);
ff_mpeg_unref_picture(s->avctx, &s->next_picture);
av_frame_free(&s->next_picture.f);
ff_free_picture_tables(&s->new_picture);
ff_mpeg_unref_picture(s->avctx, &s->new_picture);
av_frame_free(&s->new_picture.f);
free_context_frame(s);
s->context_initialized = 0;
s->last_picture_ptr =
s->next_picture_ptr =
s->current_picture_ptr = NULL;
s->linesize = s->uvlinesize = 0;
}
static void gray_frame(AVFrame *frame)
{
int i, h_chroma_shift, v_chroma_shift;
av_pix_fmt_get_chroma_sub_sample(frame->format, &h_chroma_shift, &v_chroma_shift);
for(i=0; i<frame->height; i++)
memset(frame->data[0] + frame->linesize[0]*i, 0x80, frame->width);
for(i=0; i<AV_CEIL_RSHIFT(frame->height, v_chroma_shift); i++) {
memset(frame->data[1] + frame->linesize[1]*i,
0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
memset(frame->data[2] + frame->linesize[2]*i,
0x80, AV_CEIL_RSHIFT(frame->width, h_chroma_shift));
}
}
/**
* generic function called after decoding
* the header and before a frame is decoded.
*/
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{
int i, ret;
Picture *pic;
s->mb_skipped = 0;
if (!ff_thread_can_start_frame(avctx)) {
av_log(avctx, AV_LOG_ERROR, "Attempt to start a frame outside SETUP state\n");
return -1;
}
/* mark & release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
s->last_picture_ptr != s->next_picture_ptr &&
s->last_picture_ptr->f->buf[0]) {
ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
}
/* release forgotten pictures */
/* if (MPEG-124 / H.263) */
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (&s->picture[i] != s->last_picture_ptr &&
&s->picture[i] != s->next_picture_ptr &&
s->picture[i].reference && !s->picture[i].needs_realloc) {
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
}
}
ff_mpeg_unref_picture(s->avctx, &s->current_picture);
ff_mpeg_unref_picture(s->avctx, &s->last_picture);
ff_mpeg_unref_picture(s->avctx, &s->next_picture);
/* release non reference frames */
for (i = 0; i < MAX_PICTURE_COUNT; i++) {
if (!s->picture[i].reference)
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
}
if (s->current_picture_ptr && !s->current_picture_ptr->f->buf[0]) {
// we already have an unused image
// (maybe it was set before reading the header)
pic = s->current_picture_ptr;
} else {
i = ff_find_unused_picture(s->avctx, s->picture, 0);
if (i < 0) {
av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
return i;
}
pic = &s->picture[i];
}
pic->reference = 0;
if (!s->droppable) {
if (s->pict_type != AV_PICTURE_TYPE_B)
pic->reference = 3;
}
pic->f->coded_picture_number = s->coded_picture_number++;
if (alloc_picture(s, pic, 0) < 0)
return -1;
s->current_picture_ptr = pic;
// FIXME use only the vars from current_pic
s->current_picture_ptr->f->top_field_first = s->top_field_first;
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
if (s->picture_structure != PICT_FRAME)
s->current_picture_ptr->f->top_field_first =
(s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
s->current_picture_ptr->f->interlaced_frame = !s->progressive_frame &&
!s->progressive_sequence;
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
s->current_picture_ptr->f->pict_type = s->pict_type;
// if (s->avctx->flags && AV_CODEC_FLAG_QSCALE)
// s->current_picture_ptr->quality = s->new_picture_ptr->quality;
s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
s->current_picture_ptr)) < 0)
return ret;
if (s->pict_type != AV_PICTURE_TYPE_B) {
s->last_picture_ptr = s->next_picture_ptr;
if (!s->droppable)
s->next_picture_ptr = s->current_picture_ptr;
}
ff_dlog(s->avctx, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
s->last_picture_ptr ? s->last_picture_ptr->f->data[0] : NULL,
s->next_picture_ptr ? s->next_picture_ptr->f->data[0] : NULL,
s->current_picture_ptr ? s->current_picture_ptr->f->data[0] : NULL,
s->pict_type, s->droppable);
if ((!s->last_picture_ptr || !s->last_picture_ptr->f->buf[0]) &&
(s->pict_type != AV_PICTURE_TYPE_I)) {
int h_chroma_shift, v_chroma_shift;
av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
&h_chroma_shift, &v_chroma_shift);
if (s->pict_type == AV_PICTURE_TYPE_B && s->next_picture_ptr && s->next_picture_ptr->f->buf[0])
av_log(avctx, AV_LOG_DEBUG,
"allocating dummy last picture for B frame\n");
else if (s->pict_type != AV_PICTURE_TYPE_I)
av_log(avctx, AV_LOG_ERROR,
"warning: first frame is no keyframe\n");
/* Allocate a dummy frame */
i = ff_find_unused_picture(s->avctx, s->picture, 0);
if (i < 0) {
av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
return i;
}
s->last_picture_ptr = &s->picture[i];
s->last_picture_ptr->reference = 3;
s->last_picture_ptr->f->key_frame = 0;
s->last_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
if (alloc_picture(s, s->last_picture_ptr, 0) < 0) {
s->last_picture_ptr = NULL;
return -1;
}
if (!avctx->hwaccel) {
for(i=0; i<avctx->height; i++)
memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
0x80, avctx->width);
if (s->last_picture_ptr->f->data[2]) {
for(i=0; i<AV_CEIL_RSHIFT(avctx->height, v_chroma_shift); i++) {
memset(s->last_picture_ptr->f->data[1] + s->last_picture_ptr->f->linesize[1]*i,
0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
memset(s->last_picture_ptr->f->data[2] + s->last_picture_ptr->f->linesize[2]*i,
0x80, AV_CEIL_RSHIFT(avctx->width, h_chroma_shift));
}
}
if(s->codec_id == AV_CODEC_ID_FLV1 || s->codec_id == AV_CODEC_ID_H263){
for(i=0; i<avctx->height; i++)
memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 16, avctx->width);
}
}
ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&s->last_picture_ptr->tf, INT_MAX, 1);
}
if ((!s->next_picture_ptr || !s->next_picture_ptr->f->buf[0]) &&
s->pict_type == AV_PICTURE_TYPE_B) {
/* Allocate a dummy frame */
i = ff_find_unused_picture(s->avctx, s->picture, 0);
if (i < 0) {
av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
return i;
}
s->next_picture_ptr = &s->picture[i];
s->next_picture_ptr->reference = 3;
s->next_picture_ptr->f->key_frame = 0;
s->next_picture_ptr->f->pict_type = AV_PICTURE_TYPE_P;
if (alloc_picture(s, s->next_picture_ptr, 0) < 0) {
s->next_picture_ptr = NULL;
return -1;
}
ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&s->next_picture_ptr->tf, INT_MAX, 1);
}
#if 0 // BUFREF-FIXME
memset(s->last_picture.f->data, 0, sizeof(s->last_picture.f->data));
memset(s->next_picture.f->data, 0, sizeof(s->next_picture.f->data));
#endif
if (s->last_picture_ptr) {
if (s->last_picture_ptr->f->buf[0] &&
(ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
s->last_picture_ptr)) < 0)
return ret;
}
if (s->next_picture_ptr) {
if (s->next_picture_ptr->f->buf[0] &&
(ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
s->next_picture_ptr)) < 0)
return ret;
}
av_assert0(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr &&
s->last_picture_ptr->f->buf[0]));
if (s->picture_structure!= PICT_FRAME) {
int i;
for (i = 0; i < 4; i++) {
if (s->picture_structure == PICT_BOTTOM_FIELD) {
s->current_picture.f->data[i] +=
s->current_picture.f->linesize[i];
}
s->current_picture.f->linesize[i] *= 2;
s->last_picture.f->linesize[i] *= 2;
s->next_picture.f->linesize[i] *= 2;
}
}
/* set dequantizer, we can't do it during init as
* it might change for MPEG-4 and we can't do it in the header
* decode as init is not called for MPEG-4 there yet */
if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
} else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
} else {
s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
}
if (s->avctx->debug & FF_DEBUG_NOMC) {
gray_frame(s->current_picture_ptr->f);
}
return 0;
}
/* called after a frame has been decoded. */
void ff_mpv_frame_end(MpegEncContext *s)
{
emms_c();
if (s->current_picture.reference)
ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
}
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
{
ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
p->qscale_table, p->motion_val, &s->low_delay,
s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
}
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
{
AVBufferRef *ref = av_buffer_ref(p->qscale_table_buf);
int offset = 2*s->mb_stride + 1;
if(!ref)
return AVERROR(ENOMEM);
av_assert0(ref->size >= offset + s->mb_stride * ((f->height+15)/16));
ref->size -= offset;
ref->data += offset;
return av_frame_set_qp_table(f, ref, s->mb_stride, qp_type);
}
static inline int hpel_motion_lowres(MpegEncContext *s,
uint8_t *dest, uint8_t *src,
int field_based, int field_select,
int src_x, int src_y,
int width, int height, ptrdiff_t stride,
int h_edge_pos, int v_edge_pos,
int w, int h, h264_chroma_mc_func *pix_op,
int motion_x, int motion_y)
{
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 3);
const int s_mask = (2 << lowres) - 1;
int emu = 0;
int sx, sy;
if (s->quarter_sample) {
motion_x /= 2;
motion_y /= 2;
}
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x += motion_x >> lowres + 1;
src_y += motion_y >> lowres + 1;
src += src_y * stride + src_x;
if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
(unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
s->linesize, s->linesize,
w + 1, (h + 1) << field_based,
src_x, src_y << field_based,
h_edge_pos, v_edge_pos);
src = s->sc.edge_emu_buffer;
emu = 1;
}
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
if (field_select)
src += s->linesize;
pix_op[op_index](dest, src, stride, h, sx, sy);
return emu;
}
/* apply one mpeg motion vector to the three components */
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
uint8_t *dest_y,
uint8_t *dest_cb,
uint8_t *dest_cr,
int field_based,
int bottom_field,
int field_select,
uint8_t **ref_picture,
h264_chroma_mc_func *pix_op,
int motion_x, int motion_y,
int h, int mb_y)
{
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
ptrdiff_t uvlinesize, linesize;
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 3);
const int block_s = 8>>lowres;
const int s_mask = (2 << lowres) - 1;
const int h_edge_pos = s->h_edge_pos >> lowres;
const int v_edge_pos = s->v_edge_pos >> lowres;
linesize = s->current_picture.f->linesize[0] << field_based;
uvlinesize = s->current_picture.f->linesize[1] << field_based;
// FIXME obviously not perfect but qpel will not work in lowres anyway
if (s->quarter_sample) {
motion_x /= 2;
motion_y /= 2;
}
if(field_based){
motion_y += (bottom_field - field_select)*((1 << lowres)-1);
}
sx = motion_x & s_mask;
sy = motion_y & s_mask;
src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
if (s->out_format == FMT_H263) {
uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
uvsrc_x = src_x >> 1;
uvsrc_y = src_y >> 1;
} else if (s->out_format == FMT_H261) {
// even chroma mv's are full pel in H261
mx = motion_x / 4;
my = motion_y / 4;
uvsx = (2 * mx) & s_mask;
uvsy = (2 * my) & s_mask;
uvsrc_x = s->mb_x * block_s + (mx >> lowres);
uvsrc_y = mb_y * block_s + (my >> lowres);
} else {
if(s->chroma_y_shift){
mx = motion_x / 2;
my = motion_y / 2;
uvsx = mx & s_mask;
uvsy = my & s_mask;
uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
} else {
if(s->chroma_x_shift){
//Chroma422
mx = motion_x / 2;
uvsx = mx & s_mask;
uvsy = motion_y & s_mask;
uvsrc_y = src_y;
uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
} else {
//Chroma444
uvsx = motion_x & s_mask;
uvsy = motion_y & s_mask;
uvsrc_x = src_x;
uvsrc_y = src_y;
}
}
}
ptr_y = ref_picture[0] + src_y * linesize + src_x;
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
linesize >> field_based, linesize >> field_based,
17, 17 + field_based,
src_x, src_y << field_based, h_edge_pos,
v_edge_pos);
ptr_y = s->sc.edge_emu_buffer;
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
if (s->workaround_bugs & FF_BUG_IEDGE)
vbuf -= s->uvlinesize;
s->vdsp.emulated_edge_mc(ubuf, ptr_cb,
uvlinesize >> field_based, uvlinesize >> field_based,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
s->vdsp.emulated_edge_mc(vbuf, ptr_cr,
uvlinesize >> field_based,uvlinesize >> field_based,
9, 9 + field_based,
uvsrc_x, uvsrc_y << field_based,
h_edge_pos >> 1, v_edge_pos >> 1);
ptr_cb = ubuf;
ptr_cr = vbuf;
}
}
// FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
if (bottom_field) {
dest_y += s->linesize;
dest_cb += s->uvlinesize;
dest_cr += s->uvlinesize;
}
if (field_select) {
ptr_y += s->linesize;
ptr_cb += s->uvlinesize;
ptr_cr += s->uvlinesize;
}
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
uvsx = (uvsx << 2) >> lowres;
uvsy = (uvsy << 2) >> lowres;
if (hc) {
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
}
}
// FIXME h261 lowres loop filter
}
static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
uint8_t *dest_cb, uint8_t *dest_cr,
uint8_t **ref_picture,
h264_chroma_mc_func * pix_op,
int mx, int my)
{
const int lowres = s->avctx->lowres;
const int op_index = FFMIN(lowres, 3);
const int block_s = 8 >> lowres;
const int s_mask = (2 << lowres) - 1;
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
const int v_edge_pos = s->v_edge_pos >> lowres + 1;
int emu = 0, src_x, src_y, sx, sy;
ptrdiff_t offset;
uint8_t *ptr;
if (s->quarter_sample) {
mx /= 2;
my /= 2;
}
/* In case of 8X8, we construct a single chroma motion vector
with a special rounding */
mx = ff_h263_round_chroma(mx);
my = ff_h263_round_chroma(my);
sx = mx & s_mask;
sy = my & s_mask;
src_x = s->mb_x * block_s + (mx >> lowres + 1);
src_y = s->mb_y * block_s + (my >> lowres + 1);
offset = src_y * s->uvlinesize + src_x;
ptr = ref_picture[1] + offset;
if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
(unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9,
src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->sc.edge_emu_buffer;
emu = 1;
}
sx = (sx << 2) >> lowres;
sy = (sy << 2) >> lowres;
pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
ptr = ref_picture[2] + offset;
if (emu) {
s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
s->uvlinesize, s->uvlinesize,
9, 9,
src_x, src_y, h_edge_pos, v_edge_pos);
ptr = s->sc.edge_emu_buffer;
}
pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
}
/**
* motion compensation of a single macroblock
* @param s context
* @param dest_y luma destination pointer
* @param dest_cb chroma cb/u destination pointer
* @param dest_cr chroma cr/v destination pointer
* @param dir direction (0->forward, 1->backward)
* @param ref_picture array[3] of pointers to the 3 planes of the reference picture
* @param pix_op halfpel motion compensation function (average or put normally)
* the motion vectors are taken from s->mv and the MV type from s->mv_type
*/
static inline void MPV_motion_lowres(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb,
uint8_t *dest_cr,
int dir, uint8_t **ref_picture,
h264_chroma_mc_func *pix_op)
{
int mx, my;
int mb_x, mb_y, i;
const int lowres = s->avctx->lowres;
const int block_s = 8 >>lowres;
mb_x = s->mb_x;
mb_y = s->mb_y;
switch (s->mv_type) {
case MV_TYPE_16X16:
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, 0,
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1],
2 * block_s, mb_y);
break;
case MV_TYPE_8X8:
mx = 0;
my = 0;
for (i = 0; i < 4; i++) {
hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
s->linesize) * block_s,
ref_picture[0], 0, 0,
(2 * mb_x + (i & 1)) * block_s,
(2 * mb_y + (i >> 1)) * block_s,
s->width, s->height, s->linesize,
s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
block_s, block_s, pix_op,
s->mv[dir][i][0], s->mv[dir][i][1]);
mx += s->mv[dir][i][0];
my += s->mv[dir][i][1];
}
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
pix_op, mx, my);
break;
case MV_TYPE_FIELD:
if (s->picture_structure == PICT_FRAME) {
/* top field */
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0], s->mv[dir][0][1],
block_s, mb_y);
/* bottom field */
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, 1, s->field_select[dir][1],
ref_picture, pix_op,
s->mv[dir][1][0], s->mv[dir][1][1],
block_s, mb_y);
} else {
if (s->picture_structure != s->field_select[dir][0] + 1 &&
s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
ref_picture = s->current_picture_ptr->f->data;
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][0],
ref_picture, pix_op,
s->mv[dir][0][0],
s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
}
break;
case MV_TYPE_16X8:
for (i = 0; i < 2; i++) {
uint8_t **ref2picture;
if (s->picture_structure == s->field_select[dir][i] + 1 ||
s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
ref2picture = ref_picture;
} else {
ref2picture = s->current_picture_ptr->f->data;
}
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->field_select[dir][i],
ref2picture, pix_op,
s->mv[dir][i][0], s->mv[dir][i][1] +
2 * block_s * i, block_s, mb_y >> 1);
dest_y += 2 * block_s * s->linesize;
dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
}
break;
case MV_TYPE_DMV:
if (s->picture_structure == PICT_FRAME) {
for (i = 0; i < 2; i++) {
int j;
for (j = 0; j < 2; j++) {
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1, j, j ^ i,
ref_picture, pix_op,
s->mv[dir][2 * i + j][0],
s->mv[dir][2 * i + j][1],
block_s, mb_y);
}
pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
}
} else {
for (i = 0; i < 2; i++) {
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
0, 0, s->picture_structure != i + 1,
ref_picture, pix_op,
s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
2 * block_s, mb_y >> 1);
// after put we make avg of the same block
pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
// opposite parity is always in the same
// frame if this is second field
if (!s->first_field) {
ref_picture = s->current_picture_ptr->f->data;
}
}
}
break;
default:
av_assert2(0);
}
}
/**
* find the lowest MB row referenced in the MVs
*/
static int lowest_referenced_row(MpegEncContext *s, int dir)
{
int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
int my, off, i, mvs;
if (s->picture_structure != PICT_FRAME || s->mcsel)
goto unhandled;
switch (s->mv_type) {
case MV_TYPE_16X16:
mvs = 1;
break;
case MV_TYPE_16X8:
mvs = 2;
break;
case MV_TYPE_8X8:
mvs = 4;
break;
default:
goto unhandled;
}
for (i = 0; i < mvs; i++) {
my = s->mv[dir][i][1];
my_max = FFMAX(my_max, my);
my_min = FFMIN(my_min, my);
}
off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
return av_clip(s->mb_y + off, 0, s->mb_height - 1);
unhandled:
return s->mb_height-1;
}
/* put block[] to dest[] */
static inline void put_dct(MpegEncContext *s,
int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
{
s->dct_unquantize_intra(s, block, i, qscale);
s->idsp.idct_put(dest, line_size, block);
}
/* add block[] to dest[] */
static inline void add_dct(MpegEncContext *s,
int16_t *block, int i, uint8_t *dest, int line_size)
{
if (s->block_last_index[i] >= 0) {
s->idsp.idct_add(dest, line_size, block);
}
}
static inline void add_dequant_dct(MpegEncContext *s,
int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
{
if (s->block_last_index[i] >= 0) {
s->dct_unquantize_inter(s, block, i, qscale);
s->idsp.idct_add(dest, line_size, block);
}
}
/**
* Clean dc, ac, coded_block for the current non-intra MB.
*/
void ff_clean_intra_table_entries(MpegEncContext *s)
{
int wrap = s->b8_stride;
int xy = s->block_index[0];
s->dc_val[0][xy ] =
s->dc_val[0][xy + 1 ] =
s->dc_val[0][xy + wrap] =
s->dc_val[0][xy + 1 + wrap] = 1024;
/* ac pred */
memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
if (s->msmpeg4_version>=3) {
s->coded_block[xy ] =
s->coded_block[xy + 1 ] =
s->coded_block[xy + wrap] =
s->coded_block[xy + 1 + wrap] = 0;
}
/* chroma */
wrap = s->mb_stride;
xy = s->mb_x + s->mb_y * wrap;
s->dc_val[1][xy] =
s->dc_val[2][xy] = 1024;
/* ac pred */
memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
s->mbintra_table[xy]= 0;
}
/* generic function called after a macroblock has been parsed by the
decoder or after it has been encoded by the encoder.
Important variables used:
s->mb_intra : true if intra macroblock
s->mv_dir : motion vector direction
s->mv_type : motion vector type
s->mv : motion vector
s->interlaced_dct : true if interlaced dct used (mpeg2)
*/
static av_always_inline
void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
int lowres_flag, int is_mpeg12)
{
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
if (CONFIG_XVMC &&
s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
s->avctx->hwaccel->decode_mb(s);//xvmc uses pblocks
return;
}
if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
/* print DCT coefficients */
int i,j;
av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
for(i=0; i<6; i++){
for(j=0; j<64; j++){
av_log(s->avctx, AV_LOG_DEBUG, "%5d",
block[i][s->idsp.idct_permutation[j]]);
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");
}
}
s->current_picture.qscale_table[mb_xy] = s->qscale;
/* update DC predictors for P macroblocks */
if (!s->mb_intra) {
if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
if(s->mbintra_table[mb_xy])
ff_clean_intra_table_entries(s);
} else {
s->last_dc[0] =
s->last_dc[1] =
s->last_dc[2] = 128 << s->intra_dc_precision;
}
}
else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
s->mbintra_table[mb_xy]=1;
if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
!(s->encoding && (s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
uint8_t *dest_y, *dest_cb, *dest_cr;
int dct_linesize, dct_offset;
op_pixels_func (*op_pix)[4];
qpel_mc_func (*op_qpix)[16];
const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize = s->current_picture.f->linesize[1];
const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
/* avoid copy if macroblock skipped in last frame too */
/* skip only during decoding as we might trash the buffers during encoding a bit */
if(!s->encoding){
uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
if (s->mb_skipped) {
s->mb_skipped= 0;
av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
*mbskip_ptr = 1;
} else if(!s->current_picture.reference) {
*mbskip_ptr = 1;
} else{
*mbskip_ptr = 0; /* not skipped */
}
}
dct_linesize = linesize << s->interlaced_dct;
dct_offset = s->interlaced_dct ? linesize : linesize * block_size;
if(readable){
dest_y= s->dest[0];
dest_cb= s->dest[1];
dest_cr= s->dest[2];
}else{
dest_y = s->sc.b_scratchpad;
dest_cb= s->sc.b_scratchpad+16*linesize;
dest_cr= s->sc.b_scratchpad+32*linesize;
}
if (!s->mb_intra) {
/* motion handling */
/* decoding or more than one mb_type (MC was already done otherwise) */
if(!s->encoding){
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
if (s->mv_dir & MV_DIR_FORWARD) {
ff_thread_await_progress(&s->last_picture_ptr->tf,
lowest_referenced_row(s, 0),
0);
}
if (s->mv_dir & MV_DIR_BACKWARD) {
ff_thread_await_progress(&s->next_picture_ptr->tf,
lowest_referenced_row(s, 1),
0);
}
}
if(lowres_flag){
h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
if (s->mv_dir & MV_DIR_FORWARD) {
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
}
}else{
op_qpix = s->me.qpel_put;
if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
op_pix = s->hdsp.put_pixels_tab;
}else{
op_pix = s->hdsp.put_no_rnd_pixels_tab;
}
if (s->mv_dir & MV_DIR_FORWARD) {
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
op_pix = s->hdsp.avg_pixels_tab;
op_qpix= s->me.qpel_avg;
}
if (s->mv_dir & MV_DIR_BACKWARD) {
ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
}
}
}
/* skip dequant / idct if we are really late ;) */
if(s->avctx->skip_idct){
if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
|| s->avctx->skip_idct >= AVDISCARD_ALL)
goto skip_idct;
}
/* add dct residue */
if(s->encoding || !( s->msmpeg4_version || s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO
|| (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
if (s->chroma_y_shift){
add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
}else{
dct_linesize >>= 1;
dct_offset >>=1;
add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
}
}
} else if(is_mpeg12 || (s->codec_id != AV_CODEC_ID_WMV2)){
add_dct(s, block[0], 0, dest_y , dct_linesize);
add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
if(s->chroma_y_shift){//Chroma420
add_dct(s, block[4], 4, dest_cb, uvlinesize);
add_dct(s, block[5], 5, dest_cr, uvlinesize);
}else{
//chroma422
dct_linesize = uvlinesize << s->interlaced_dct;
dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
add_dct(s, block[4], 4, dest_cb, dct_linesize);
add_dct(s, block[5], 5, dest_cr, dct_linesize);
add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
if(!s->chroma_x_shift){//Chroma444
add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
}
}
}//fi gray
}
else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
}
} else {
/* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
if (s->avctx->bits_per_raw_sample > 8){
const int act_block_size = block_size * 2;
s->idsp.idct_put(dest_y, dct_linesize, (int16_t*)(*s->block32)[0]);
s->idsp.idct_put(dest_y + act_block_size, dct_linesize, (int16_t*)(*s->block32)[1]);
s->idsp.idct_put(dest_y + dct_offset, dct_linesize, (int16_t*)(*s->block32)[2]);
s->idsp.idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->block32)[3]);
dct_linesize = uvlinesize << s->interlaced_dct;
dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
s->idsp.idct_put(dest_cb, dct_linesize, (int16_t*)(*s->block32)[4]);
s->idsp.idct_put(dest_cr, dct_linesize, (int16_t*)(*s->block32)[5]);
s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, (int16_t*)(*s->block32)[6]);
s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, (int16_t*)(*s->block32)[7]);
if(!s->chroma_x_shift){//Chroma444
s->idsp.idct_put(dest_cb + act_block_size, dct_linesize, (int16_t*)(*s->block32)[8]);
s->idsp.idct_put(dest_cr + act_block_size, dct_linesize, (int16_t*)(*s->block32)[9]);
s->idsp.idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[10]);
s->idsp.idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->block32)[11]);
}
}
/* dct only in intra block */
else if(s->encoding || !(s->codec_id==AV_CODEC_ID_MPEG1VIDEO || s->codec_id==AV_CODEC_ID_MPEG2VIDEO)){
put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
if(s->chroma_y_shift){
put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
}else{
dct_offset >>=1;
dct_linesize >>=1;
put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
}
}
}else{
s->idsp.idct_put(dest_y, dct_linesize, block[0]);
s->idsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
s->idsp.idct_put(dest_y + dct_offset, dct_linesize, block[2]);
s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
if(s->chroma_y_shift){
s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
}else{
dct_linesize = uvlinesize << s->interlaced_dct;
dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
s->idsp.idct_put(dest_cb, dct_linesize, block[4]);
s->idsp.idct_put(dest_cr, dct_linesize, block[5]);
s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
if(!s->chroma_x_shift){//Chroma444
s->idsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
s->idsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
}
}
}//gray
}
}
skip_idct:
if(!readable){
s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
}
}
}
}
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
{
#if !CONFIG_SMALL
if(s->out_format == FMT_MPEG1) {
if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 1);
else mpv_reconstruct_mb_internal(s, block, 0, 1);
} else
#endif
if(s->avctx->lowres) mpv_reconstruct_mb_internal(s, block, 1, 0);
else mpv_reconstruct_mb_internal(s, block, 0, 0);
}
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
{
ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
s->last_picture_ptr ? s->last_picture_ptr->f : NULL, y, h, s->picture_structure,
s->first_field, s->low_delay);
}
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
const int linesize = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
const int uvlinesize = s->current_picture.f->linesize[1];
const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
const int height_of_mb = 4 - s->avctx->lowres;
s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
//block_index is not used by mpeg2, so it is not affected by chroma_format
s->dest[0] = s->current_picture.f->data[0] + (int)((s->mb_x - 1U) << width_of_mb);
s->dest[1] = s->current_picture.f->data[1] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
s->dest[2] = s->current_picture.f->data[2] + (int)((s->mb_x - 1U) << (width_of_mb - s->chroma_x_shift));
if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
{
if(s->picture_structure==PICT_FRAME){
s->dest[0] += s->mb_y * linesize << height_of_mb;
s->dest[1] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
s->dest[2] += s->mb_y * uvlinesize << (height_of_mb - s->chroma_y_shift);
}else{
s->dest[0] += (s->mb_y>>1) * linesize << height_of_mb;
s->dest[1] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
s->dest[2] += (s->mb_y>>1) * uvlinesize << (height_of_mb - s->chroma_y_shift);
av_assert1((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
}
}
}
void ff_mpeg_flush(AVCodecContext *avctx){
int i;
MpegEncContext *s = avctx->priv_data;
if (!s || !s->picture)
return;
for (i = 0; i < MAX_PICTURE_COUNT; i++)
ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
ff_mpeg_unref_picture(s->avctx, &s->current_picture);
ff_mpeg_unref_picture(s->avctx, &s->last_picture);
ff_mpeg_unref_picture(s->avctx, &s->next_picture);
s->mb_x= s->mb_y= 0;
s->closed_gop= 0;
s->parse_context.state= -1;
s->parse_context.frame_start_found= 0;
s->parse_context.overread= 0;
s->parse_context.overread_index= 0;
s->parse_context.index= 0;
s->parse_context.last_index= 0;
s->bitstream_buffer_size=0;
s->pp_time=0;
}
/**
* set qscale and update qscale dependent variables.
*/
void ff_set_qscale(MpegEncContext * s, int qscale)
{
if (qscale < 1)
qscale = 1;
else if (qscale > 31)
qscale = 31;
s->qscale = qscale;
s->chroma_qscale= s->chroma_qscale_table[qscale];
s->y_dc_scale= s->y_dc_scale_table[ qscale ];
s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
}
void ff_mpv_report_decode_progress(MpegEncContext *s)
{
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_192_2 |
crossvul-cpp_data_bad_3194_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS DDDD %
% P P SS D D %
% PPPP SSS D D %
% P SS D D %
% P SSSSS DDDD %
% %
% %
% Read/Write Adobe Photoshop Image Format %
% %
% Software Design %
% Cristy %
% Leonard Rosenthol %
% July 1992 %
% Dirk Lemstra %
% December 2013 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/channel.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/registry.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#ifdef MAGICKCORE_ZLIB_DELEGATE
#include <zlib.h>
#endif
#include "psd-private.h"
/*
Define declaractions.
*/
#define MaxPSDChannels 56
#define PSDQuantum(x) (((ssize_t) (x)+1) & -2)
/*
Enumerated declaractions.
*/
typedef enum
{
Raw = 0,
RLE = 1,
ZipWithoutPrediction = 2,
ZipWithPrediction = 3
} PSDCompressionType;
typedef enum
{
BitmapMode = 0,
GrayscaleMode = 1,
IndexedMode = 2,
RGBMode = 3,
CMYKMode = 4,
MultichannelMode = 7,
DuotoneMode = 8,
LabMode = 9
} PSDImageType;
/*
Typedef declaractions.
*/
typedef struct _ChannelInfo
{
short int
type;
size_t
size;
} ChannelInfo;
typedef struct _MaskInfo
{
Image
*image;
RectangleInfo
page;
unsigned char
background,
flags;
} MaskInfo;
typedef struct _LayerInfo
{
ChannelInfo
channel_info[MaxPSDChannels];
char
blendkey[4];
Image
*image;
MaskInfo
mask;
Quantum
opacity;
RectangleInfo
page;
size_t
offset_x,
offset_y;
unsigned char
clipping,
flags,
name[256],
visible;
unsigned short
channels;
StringInfo
*info;
} LayerInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WritePSDImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S D %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPSD()() returns MagickTrue if the image format type, identified by the
% magick string, is PSD.
%
% The format of the IsPSD method is:
%
% MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"8BPS",4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSDImage() reads an Adobe Photoshop image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadPSDImage method is:
%
% Image *ReadPSDImage(image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *CompositeOperatorToPSDBlendMode(CompositeOperator op)
{
const char
*blend_mode;
switch (op)
{
case ColorBurnCompositeOp: blend_mode = "idiv"; break;
case ColorDodgeCompositeOp: blend_mode = "div "; break;
case ColorizeCompositeOp: blend_mode = "colr"; break;
case DarkenCompositeOp: blend_mode = "dark"; break;
case DifferenceCompositeOp: blend_mode = "diff"; break;
case DissolveCompositeOp: blend_mode = "diss"; break;
case ExclusionCompositeOp: blend_mode = "smud"; break;
case HardLightCompositeOp: blend_mode = "hLit"; break;
case HardMixCompositeOp: blend_mode = "hMix"; break;
case HueCompositeOp: blend_mode = "hue "; break;
case LightenCompositeOp: blend_mode = "lite"; break;
case LinearBurnCompositeOp: blend_mode = "lbrn"; break;
case LinearDodgeCompositeOp:blend_mode = "lddg"; break;
case LinearLightCompositeOp:blend_mode = "lLit"; break;
case LuminizeCompositeOp: blend_mode = "lum "; break;
case MultiplyCompositeOp: blend_mode = "mul "; break;
case OverCompositeOp: blend_mode = "norm"; break;
case OverlayCompositeOp: blend_mode = "over"; break;
case PinLightCompositeOp: blend_mode = "pLit"; break;
case SaturateCompositeOp: blend_mode = "sat "; break;
case ScreenCompositeOp: blend_mode = "scrn"; break;
case SoftLightCompositeOp: blend_mode = "sLit"; break;
case VividLightCompositeOp: blend_mode = "vLit"; break;
default: blend_mode = "norm"; break;
}
return(blend_mode);
}
/*
For some reason Photoshop seems to blend semi-transparent pixels with white.
This method reverts the blending. This can be disabled by setting the
option 'psd:alpha-unblend' to off.
*/
static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info,
Image *image, ExceptionInfo* exception)
{
const char
*option;
MagickBooleanType
status;
ssize_t
y;
if (image->matte == MagickFalse || image->colorspace != sRGBColorspace)
return(MagickTrue);
option=GetImageOption(image_info,"psd:alpha-unblend");
if (IsStringNotFalse(option) == MagickFalse)
return(MagickTrue);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
gamma;
gamma=QuantumScale*GetPixelAlpha(q);
if (gamma != 0.0 && gamma != 1.0)
{
SetPixelRed(q,(GetPixelRed(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelGreen(q,(GetPixelGreen(q)-((1.0-gamma)*QuantumRange))/gamma);
SetPixelBlue(q,(GetPixelBlue(q)-((1.0-gamma)*QuantumRange))/gamma);
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static inline CompressionType ConvertPSDCompression(
PSDCompressionType compression)
{
switch (compression)
{
case RLE:
return RLECompression;
case ZipWithPrediction:
case ZipWithoutPrediction:
return ZipCompression;
default:
return NoCompression;
}
}
static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity,
MagickBooleanType revert,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying layer opacity %.20g", (double) opacity);
if (opacity == QuantumRange)
return(MagickTrue);
image->matte=MagickTrue;
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (revert == MagickFalse)
SetPixelAlpha(q,(Quantum) (QuantumScale*(GetPixelAlpha(q)*opacity)));
else if (opacity > 0)
SetPixelAlpha(q,(Quantum) (QuantumRange*(GetPixelAlpha(q)/
(MagickRealType) opacity)));
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
return(status);
}
static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask,
Quantum background,MagickBooleanType revert,ExceptionInfo *exception)
{
Image
*complete_mask;
MagickBooleanType
status;
MagickPixelPacket
color;
ssize_t
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" applying opacity mask");
complete_mask=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
complete_mask->matte=MagickTrue;
GetMagickPixelPacket(complete_mask,&color);
color.red=background;
SetImageColor(complete_mask,&color);
status=CompositeImage(complete_mask,OverCompositeOp,mask,
mask->page.x-image->page.x,mask->page.y-image->page.y);
if (status == MagickFalse)
{
complete_mask=DestroyImage(complete_mask);
return(status);
}
image->matte=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetAuthenticPixels(image,0,y,image->columns,1,exception);
p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception);
if ((q == (PixelPacket *) NULL) || (p == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
alpha,
intensity;
alpha=GetPixelAlpha(q);
intensity=GetPixelIntensity(complete_mask,p);
if (revert == MagickFalse)
SetPixelAlpha(q,ClampToQuantum(intensity*(QuantumScale*alpha)));
else if (intensity > 0)
SetPixelAlpha(q,ClampToQuantum((alpha/intensity)*QuantumRange));
q++;
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
status=MagickFalse;
}
complete_mask=DestroyImage(complete_mask);
return(status);
}
static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info,
ExceptionInfo *exception)
{
char
*key;
RandomInfo
*random_info;
StringInfo
*key_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" preserving opacity mask");
random_info=AcquireRandomInfo();
key_info=GetRandomKey(random_info,2+1);
key=(char *) GetStringInfoDatum(key_info);
key[8]=layer_info->mask.background;
key[9]='\0';
layer_info->mask.image->page.x+=layer_info->page.x;
layer_info->mask.image->page.y+=layer_info->page.y;
(void) SetImageRegistry(ImageRegistryType,(const char *) key,
layer_info->mask.image,exception);
(void) SetImageArtifact(layer_info->image,"psd:opacity-mask",
(const char *) key);
key_info=DestroyStringInfo(key_info);
random_info=DestroyRandomInfo(random_info);
}
static ssize_t DecodePSDPixels(const size_t number_compact_pixels,
const unsigned char *compact_pixels,const ssize_t depth,
const size_t number_pixels,unsigned char *pixels)
{
#define CheckNumberCompactPixels \
if (packets == 0) \
return(i); \
packets--
#define CheckNumberPixels(count) \
if (((ssize_t) i + count) > (ssize_t) number_pixels) \
return(i); \
i+=count
int
pixel;
register ssize_t
i,
j;
size_t
length;
ssize_t
packets;
packets=(ssize_t) number_compact_pixels;
for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); )
{
packets--;
length=(size_t) (*compact_pixels++);
if (length == 128)
continue;
if (length > 128)
{
length=256-length+1;
CheckNumberCompactPixels;
pixel=(*compact_pixels++);
for (j=0; j < (ssize_t) length; j++)
{
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(pixel >> 7) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 6) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 5) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 4) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 3) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 2) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 1) & 0x01 ? 0U : 255U;
*pixels++=(pixel >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(unsigned char) ((pixel >> 6) & 0x03);
*pixels++=(unsigned char) ((pixel >> 4) & 0x03);
*pixels++=(unsigned char) ((pixel >> 2) & 0x03);
*pixels++=(unsigned char) ((pixel & 0x03) & 0x03);
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(unsigned char) ((pixel >> 4) & 0xff);
*pixels++=(unsigned char) ((pixel & 0x0f) & 0xff);
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(unsigned char) pixel;
break;
}
}
}
continue;
}
length++;
for (j=0; j < (ssize_t) length; j++)
{
CheckNumberCompactPixels;
switch (depth)
{
case 1:
{
CheckNumberPixels(8);
*pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U;
*pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U;
break;
}
case 2:
{
CheckNumberPixels(4);
*pixels++=(*compact_pixels >> 6) & 0x03;
*pixels++=(*compact_pixels >> 4) & 0x03;
*pixels++=(*compact_pixels >> 2) & 0x03;
*pixels++=(*compact_pixels & 0x03) & 0x03;
break;
}
case 4:
{
CheckNumberPixels(2);
*pixels++=(*compact_pixels >> 4) & 0xff;
*pixels++=(*compact_pixels & 0x0f) & 0xff;
break;
}
default:
{
CheckNumberPixels(1);
*pixels++=(*compact_pixels);
break;
}
}
compact_pixels++;
}
}
return(i);
}
static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info,
const ssize_t number_layers)
{
ssize_t
i;
for (i=0; i<number_layers; i++)
{
if (layer_info[i].image != (Image *) NULL)
layer_info[i].image=DestroyImage(layer_info[i].image);
if (layer_info[i].mask.image != (Image *) NULL)
layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image);
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
return (LayerInfo *) RelinquishMagickMemory(layer_info);
}
static inline size_t GetPSDPacketSize(Image *image)
{
if (image->storage_class == PseudoClass)
{
if (image->colors > 256)
return(2);
else if (image->depth > 8)
return(2);
}
else
if (image->depth > 8)
return(2);
return(1);
}
static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image)
{
if (psd_info->version == 1)
return((MagickSizeType) ReadBlobLong(image));
return((MagickSizeType) ReadBlobLongLong(image));
}
static inline size_t GetPSDRowSize(Image *image)
{
if (image->depth == 1)
return(((image->columns+7)/8)*GetPSDPacketSize(image));
else
return(image->columns*GetPSDPacketSize(image));
}
static const char *ModeToString(PSDImageType type)
{
switch (type)
{
case BitmapMode: return "Bitmap";
case GrayscaleMode: return "Grayscale";
case IndexedMode: return "Indexed";
case RGBMode: return "RGB";
case CMYKMode: return "CMYK";
case MultichannelMode: return "Multichannel";
case DuotoneMode: return "Duotone";
case LabMode: return "L*A*B";
default: return "unknown";
}
}
static void ParseImageResourceBlocks(Image *image,
const unsigned char *blocks,size_t length,
MagickBooleanType *has_merged_image)
{
const unsigned char
*p;
StringInfo
*profile;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
if (length < 16)
return;
profile=BlobToStringInfo((const void *) NULL,length);
SetStringInfoDatum(profile,blocks);
(void) SetImageProfile(image,"8bim",profile);
profile=DestroyStringInfo(profile);
for (p=blocks; (p >= blocks) && (p < (blocks+length-16)); )
{
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if ((p+count) > (blocks+length-16))
return;
switch (id)
{
case 0x03ed:
{
char
value[MaxTextExtent];
unsigned short
resolution;
/*
Resolution info.
*/
p=PushShortPixel(MSBEndian,p,&resolution);
image->x_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->x_resolution);
(void) SetImageProperty(image,"tiff:XResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&resolution);
image->y_resolution=(double) resolution;
(void) FormatLocaleString(value,MaxTextExtent,"%g",
image->y_resolution);
(void) SetImageProperty(image,"tiff:YResolution",value);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushShortPixel(MSBEndian,p,&short_sans);
image->units=PixelsPerInchResolution;
break;
}
case 0x0421:
{
if (*(p+4) == 0)
*has_merged_image=MagickFalse;
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
return;
}
static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode)
{
if (mode == (const char *) NULL)
return(OverCompositeOp);
if (LocaleNCompare(mode,"norm",4) == 0)
return(OverCompositeOp);
if (LocaleNCompare(mode,"mul ",4) == 0)
return(MultiplyCompositeOp);
if (LocaleNCompare(mode,"diss",4) == 0)
return(DissolveCompositeOp);
if (LocaleNCompare(mode,"diff",4) == 0)
return(DifferenceCompositeOp);
if (LocaleNCompare(mode,"dark",4) == 0)
return(DarkenCompositeOp);
if (LocaleNCompare(mode,"lite",4) == 0)
return(LightenCompositeOp);
if (LocaleNCompare(mode,"hue ",4) == 0)
return(HueCompositeOp);
if (LocaleNCompare(mode,"sat ",4) == 0)
return(SaturateCompositeOp);
if (LocaleNCompare(mode,"colr",4) == 0)
return(ColorizeCompositeOp);
if (LocaleNCompare(mode,"lum ",4) == 0)
return(LuminizeCompositeOp);
if (LocaleNCompare(mode,"scrn",4) == 0)
return(ScreenCompositeOp);
if (LocaleNCompare(mode,"over",4) == 0)
return(OverlayCompositeOp);
if (LocaleNCompare(mode,"hLit",4) == 0)
return(HardLightCompositeOp);
if (LocaleNCompare(mode,"sLit",4) == 0)
return(SoftLightCompositeOp);
if (LocaleNCompare(mode,"smud",4) == 0)
return(ExclusionCompositeOp);
if (LocaleNCompare(mode,"div ",4) == 0)
return(ColorDodgeCompositeOp);
if (LocaleNCompare(mode,"idiv",4) == 0)
return(ColorBurnCompositeOp);
if (LocaleNCompare(mode,"lbrn",4) == 0)
return(LinearBurnCompositeOp);
if (LocaleNCompare(mode,"lddg",4) == 0)
return(LinearDodgeCompositeOp);
if (LocaleNCompare(mode,"lLit",4) == 0)
return(LinearLightCompositeOp);
if (LocaleNCompare(mode,"vLit",4) == 0)
return(VividLightCompositeOp);
if (LocaleNCompare(mode,"pLit",4) == 0)
return(PinLightCompositeOp);
if (LocaleNCompare(mode,"hMix",4) == 0)
return(HardMixCompositeOp);
return(OverCompositeOp);
}
static inline void ReversePSDString(Image *image,char *p,size_t length)
{
char
*q;
if (image->endian == MSBEndian)
return;
q=p+length;
for(--q; p < q; ++p, --q)
{
*p = *p ^ *q,
*q = *p ^ *q,
*p = *p ^ *q;
}
}
static inline void SetPSDPixel(Image *image,const size_t channels,
const ssize_t type,const size_t packet_size,const Quantum pixel,
PixelPacket *q,IndexPacket *indexes,ssize_t x)
{
if (image->storage_class == PseudoClass)
{
if (packet_size == 1)
SetPixelIndex(indexes+x,ScaleQuantumToChar(pixel));
else
SetPixelIndex(indexes+x,ScaleQuantumToShort(pixel));
SetPixelRGBO(q,image->colormap+(ssize_t)
ConstrainColormapIndex(image,GetPixelIndex(indexes+x)));
return;
}
switch (type)
{
case -1:
{
SetPixelAlpha(q,pixel);
break;
}
case -2:
case 0:
{
SetPixelRed(q,pixel);
if (channels == 1 || type == -2)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
break;
}
case 1:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(q,pixel);
else
SetPixelGreen(q,pixel);
break;
}
case 2:
{
if (image->storage_class == PseudoClass)
SetPixelAlpha(q,pixel);
else
SetPixelBlue(q,pixel);
break;
}
case 3:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(indexes+x,pixel);
else
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
case 4:
{
if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) &&
(channels > 3))
break;
if (image->matte != MagickFalse)
SetPixelAlpha(q,pixel);
break;
}
}
}
static MagickBooleanType ReadPSDChannelPixels(Image *image,const size_t channels,
const size_t row,const ssize_t type,const unsigned char *pixels,
ExceptionInfo *exception)
{
Quantum
pixel;
register const unsigned char
*p;
register IndexPacket
*indexes;
register PixelPacket
*q;
register ssize_t
x;
size_t
packet_size;
unsigned short
nibble;
p=pixels;
q=GetAuthenticPixels(image,0,row,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
indexes=GetAuthenticIndexQueue(image);
packet_size=GetPSDPacketSize(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (packet_size == 1)
pixel=ScaleCharToQuantum(*p++);
else
{
p=PushShortPixel(MSBEndian,p,&nibble);
pixel=ScaleShortToQuantum(nibble);
}
if (image->depth > 1)
{
SetPSDPixel(image,channels,type,packet_size,pixel,q++,indexes,x);
}
else
{
ssize_t
bit,
number_bits;
number_bits=image->columns-x;
if (number_bits > 8)
number_bits=8;
for (bit=0; bit < number_bits; bit++)
{
SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel)
& (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q++,indexes,x++);
}
if (x != (ssize_t) image->columns)
x--;
continue;
}
}
return(SyncAuthenticPixels(image,exception));
}
static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels,
const ssize_t type,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
count,
row_size;
ssize_t
y;
unsigned char
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RAW");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,row_size,pixels);
if (count != row_size)
break;
status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception);
if (status == MagickFalse)
break;
}
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
static inline MagickOffsetType *ReadPSDRLESizes(Image *image,
const PSDInfo *psd_info,const size_t size)
{
MagickOffsetType
*sizes;
ssize_t
y;
sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes));
if(sizes != (MagickOffsetType *) NULL)
{
for (y=0; y < (ssize_t) size; y++)
{
if (psd_info->version == 1)
sizes[y]=(MagickOffsetType) ReadBlobShort(image);
else
sizes[y]=(MagickOffsetType) ReadBlobLong(image);
}
}
return sizes;
}
static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info,
const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception)
{
MagickBooleanType
status;
size_t
length,
row_size;
ssize_t
count,
y;
unsigned char
*compact_pixels,
*pixels;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is RLE compressed");
row_size=GetPSDRowSize(image);
pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
length=0;
for (y=0; y < (ssize_t) image->rows; y++)
if ((MagickOffsetType) length < sizes[y])
length=(size_t) sizes[y];
compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels));
if (compact_pixels == (unsigned char *) NULL)
{
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(compact_pixels,0,length*sizeof(*compact_pixels));
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=MagickFalse;
count=ReadBlob(image,(size_t) sizes[y],compact_pixels);
if (count != (ssize_t) sizes[y])
break;
count=DecodePSDPixels((size_t) sizes[y],compact_pixels,
(ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels);
if (count != (ssize_t) row_size)
break;
status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels,
exception);
if (status == MagickFalse)
break;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels,
const ssize_t type,const PSDCompressionType compression,
const size_t compact_size,ExceptionInfo *exception)
{
MagickBooleanType
status;
register unsigned char
*p;
size_t
count,
length,
packet_size,
row_size;
ssize_t
y;
unsigned char
*compact_pixels,
*pixels;
z_stream
stream;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is ZIP compressed");
compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size,
sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
packet_size=GetPSDPacketSize(image);
row_size=image->columns*packet_size;
count=image->rows*row_size;
pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels));
if (pixels == (unsigned char *) NULL)
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
(void) ReadBlob(image,compact_size,compact_pixels);
stream.next_in=(Bytef *)compact_pixels;
stream.avail_in=(uInt) compact_size;
stream.next_out=(Bytef *)pixels;
stream.avail_out=(uInt) count;
if (inflateInit(&stream) == Z_OK)
{
int
ret;
while (stream.avail_out > 0)
{
ret=inflate(&stream, Z_SYNC_FLUSH);
if ((ret != Z_OK) && (ret != Z_STREAM_END))
{
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(MagickFalse);
}
}
}
if (compression == ZipWithPrediction)
{
p=pixels;
while (count > 0)
{
length=image->columns;
while (--length)
{
if (packet_size == 2)
{
p[2]+=p[0]+((p[1]+p[3]) >> 8);
p[3]+=p[1];
}
else
*(p+1)+=*p;
p+=packet_size;
}
p+=packet_size;
count-=row_size;
}
}
status=MagickTrue;
p=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
status=ReadPSDChannelPixels(image,channels,y,type,p,exception);
if (status == MagickFalse)
break;
p+=row_size;
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
pixels=(unsigned char *) RelinquishMagickMemory(pixels);
return(status);
}
#endif
static MagickBooleanType ReadPSDChannel(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info,
const size_t channel,const PSDCompressionType compression,
ExceptionInfo *exception)
{
Image
*channel_image,
*mask;
MagickOffsetType
offset;
MagickBooleanType
status;
channel_image=image;
mask=(Image *) NULL;
if (layer_info->channel_info[channel].type < -1)
{
const char
*option;
/*
Ignore mask that is not a user supplied layer mask, if the mask is
disabled or if the flags have unsupported values.
*/
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if ((layer_info->channel_info[channel].type != -2) ||
(layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) &&
(IsStringTrue(option) == MagickFalse)))
{
SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR);
return(MagickTrue);
}
mask=CloneImage(image,layer_info->mask.page.width,
layer_info->mask.page.height,MagickFalse,exception);
mask->matte=MagickFalse;
channel_image=mask;
}
offset=TellBlob(image);
status=MagickTrue;
switch(compression)
{
case Raw:
status=ReadPSDChannelRaw(channel_image,psd_info->channels,
layer_info->channel_info[channel].type,exception);
break;
case RLE:
{
MagickOffsetType
*sizes;
sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=ReadPSDChannelRLE(channel_image,psd_info,
layer_info->channel_info[channel].type,sizes,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
}
break;
case ZipWithPrediction:
case ZipWithoutPrediction:
#ifdef MAGICKCORE_ZLIB_DELEGATE
status=ReadPSDChannelZip(channel_image,layer_info->channels,
layer_info->channel_info[channel].type,compression,
layer_info->channel_info[channel].size-2,exception);
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (ZLIB)",image->filename);
#endif
break;
default:
(void) ThrowMagickException(exception,GetMagickModule(),TypeWarning,
"CompressionNotSupported","'%.20g'",(double) compression);
break;
}
SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET);
if (status == MagickFalse)
{
if (mask != (Image *) NULL)
DestroyImage(mask);
ThrowBinaryException(CoderError,"UnableToDecompressImage",
image->filename);
}
layer_info->mask.image=mask;
return(status);
}
static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info,
const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception)
{
char
message[MaxTextExtent];
MagickBooleanType
status;
PSDCompressionType
compression;
ssize_t
j;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" setting up new layer image");
if (psd_info->mode != IndexedMode)
(void) SetImageBackgroundColor(layer_info->image);
layer_info->image->compose=PSDBlendModeToCompositeOperator(
layer_info->blendkey);
if (layer_info->visible == MagickFalse)
{
layer_info->image->compose=NoCompositeOp;
(void) SetImageArtifact(layer_info->image,"psd:layer.invisible","true");
}
if (psd_info->mode == CMYKMode)
SetImageColorspace(layer_info->image,CMYKColorspace);
else if ((psd_info->mode == BitmapMode) || (psd_info->mode == DuotoneMode) ||
(psd_info->mode == GrayscaleMode))
SetImageColorspace(layer_info->image,GRAYColorspace);
/*
Set up some hidden attributes for folks that need them.
*/
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.x);
(void) SetImageArtifact(layer_info->image,"psd:layer.x",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",
(double) layer_info->page.y);
(void) SetImageArtifact(layer_info->image,"psd:layer.y",message);
(void) FormatLocaleString(message,MaxTextExtent,"%.20g",(double)
layer_info->opacity);
(void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message);
(void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name);
status=MagickTrue;
for (j=0; j < (ssize_t) layer_info->channels; j++)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for channel %.20g",(double) j);
compression=(PSDCompressionType) ReadBlobShort(layer_info->image);
layer_info->image->compression=ConvertPSDCompression(compression);
if (layer_info->channel_info[j].type == -1)
layer_info->image->matte=MagickTrue;
status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j,
compression,exception);
InheritException(exception,&layer_info->image->exception);
if (status == MagickFalse)
break;
}
if (status != MagickFalse)
status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity,
MagickFalse,exception);
if ((status != MagickFalse) &&
(layer_info->image->colorspace == CMYKColorspace))
status=NegateImage(layer_info->image,MagickFalse);
if (status != MagickFalse && layer_info->mask.image != (Image *) NULL)
{
const char
*option;
layer_info->mask.image->page.x=layer_info->mask.page.x;
layer_info->mask.image->page.y=layer_info->mask.page.y;
/* Do not composite the mask when it is disabled */
if ((layer_info->mask.flags & 0x02) == 0x02)
layer_info->mask.image->compose=NoCompositeOp;
else
status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image,
layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse,
exception);
option=GetImageOption(image_info,"psd:preserve-opacity-mask");
if (IsStringTrue(option) != MagickFalse)
PreservePSDOpacityMask(image,layer_info,exception);
layer_info->mask.image=DestroyImage(layer_info->mask.image);
}
return(status);
}
ModuleExport MagickBooleanType ReadPSDLayers(Image *image,
const ImageInfo *image_info,const PSDInfo *psd_info,
const MagickBooleanType skip_layers,ExceptionInfo *exception)
{
char
type[4];
LayerInfo
*layer_info;
MagickSizeType
size;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
count,
j,
number_layers;
size=GetPSDSize(psd_info,image);
if (size == 0)
{
/*
Skip layers & masks.
*/
(void) ReadBlobLong(image);
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
status=MagickFalse;
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
return(MagickTrue);
else
{
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count != 0) && (LocaleNCompare(type,"Lr16",4) == 0))
size=GetPSDSize(psd_info,image);
else
return(MagickTrue);
}
}
status=MagickTrue;
if (size != 0)
{
layer_info=(LayerInfo *) NULL;
number_layers=(short) ReadBlobShort(image);
if (number_layers < 0)
{
/*
The first alpha channel in the merged result contains the
transparency data for the merged result.
*/
number_layers=MagickAbsoluteValue(number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" negative layer count corrected for");
image->matte=MagickTrue;
}
/*
We only need to know if the image has an alpha channel
*/
if (skip_layers != MagickFalse)
return(MagickTrue);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image contains %.20g layers",(double) number_layers);
if (number_layers == 0)
ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers",
image->filename);
layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers,
sizeof(*layer_info));
if (layer_info == (LayerInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of LayerInfo failed");
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) ResetMagickMemory(layer_info,0,(size_t) number_layers*
sizeof(*layer_info));
for (i=0; i < number_layers; i++)
{
ssize_t
x,
y;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading layer #%.20g",(double) i+1);
layer_info[i].page.y=ReadBlobSignedLong(image);
layer_info[i].page.x=ReadBlobSignedLong(image);
y=ReadBlobSignedLong(image);
x=ReadBlobSignedLong(image);
layer_info[i].page.width=(size_t) (x-layer_info[i].page.x);
layer_info[i].page.height=(size_t) (y-layer_info[i].page.y);
layer_info[i].channels=ReadBlobShort(image);
if (layer_info[i].channels > MaxPSDChannels)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded",
image->filename);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g",
(double) layer_info[i].page.x,(double) layer_info[i].page.y,
(double) layer_info[i].page.height,(double)
layer_info[i].page.width,(double) layer_info[i].channels);
for (j=0; j < (ssize_t) layer_info[i].channels; j++)
{
layer_info[i].channel_info[j].type=(short) ReadBlobShort(image);
layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info,
image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" channel[%.20g]: type=%.20g, size=%.20g",(double) j,
(double) layer_info[i].channel_info[j].type,
(double) layer_info[i].channel_info[j].size);
}
count=ReadBlob(image,4,(unsigned char *) type);
ReversePSDString(image,type,4);
if ((count == 0) || (LocaleNCompare(type,"8BIM",4) != 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer type was %.4s instead of 8BIM", type);
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"ImproperImageHeader",
image->filename);
}
(void) ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey);
ReversePSDString(image,layer_info[i].blendkey,4);
layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
layer_info[i].clipping=(unsigned char) ReadBlobByte(image);
layer_info[i].flags=(unsigned char) ReadBlobByte(image);
layer_info[i].visible=!(layer_info[i].flags & 0x02);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s",
layer_info[i].blendkey,(double) layer_info[i].opacity,
layer_info[i].clipping ? "true" : "false",layer_info[i].flags,
layer_info[i].visible ? "true" : "false");
(void) ReadBlobByte(image); /* filler */
size=ReadBlobLong(image);
if (size != 0)
{
MagickSizeType
combined_length,
length;
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer contains additional info");
length=ReadBlobLong(image);
combined_length=length+4;
if (length != 0)
{
/*
Layer mask info.
*/
layer_info[i].mask.page.y=ReadBlobSignedLong(image);
layer_info[i].mask.page.x=ReadBlobSignedLong(image);
layer_info[i].mask.page.height=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.y);
layer_info[i].mask.page.width=(size_t) (ReadBlobLong(image)-
layer_info[i].mask.page.x);
layer_info[i].mask.background=(unsigned char) ReadBlobByte(
image);
layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image);
if (!(layer_info[i].mask.flags & 0x01))
{
layer_info[i].mask.page.y=layer_info[i].mask.page.y-
layer_info[i].page.y;
layer_info[i].mask.page.x=layer_info[i].mask.page.x-
layer_info[i].page.x;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g",
(double) layer_info[i].mask.page.x,(double)
layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,
(double) layer_info[i].mask.page.height,(double)
((MagickOffsetType) length)-18);
/*
Skip over the rest of the layer mask information.
*/
if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile",
image->filename);
}
}
length=ReadBlobLong(image);
combined_length+=length+4;
if (length != 0)
{
/*
Layer blending ranges info.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer blending ranges: length=%.20g",(double)
((MagickOffsetType) length));
/*
We read it, but don't use it...
*/
for (j=0; j < (ssize_t) length; j+=8)
{
size_t blend_source=ReadBlobLong(image);
size_t blend_dest=ReadBlobLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" source(%x), dest(%x)",(unsigned int)
blend_source,(unsigned int) blend_dest);
}
}
/*
Layer name.
*/
length=(MagickSizeType) (unsigned char) ReadBlobByte(image);
combined_length+=length+1;
if (length > 0)
(void) ReadBlob(image,(size_t) length++,layer_info[i].name);
layer_info[i].name[length]='\0';
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer name: %s",layer_info[i].name);
if ((length % 4) != 0)
{
length=4-(length % 4);
combined_length+=length;
/* Skip over the padding of the layer name */
if (DiscardBlobBytes(image,length) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
length=(MagickSizeType) size-combined_length;
if (length > 0)
{
unsigned char
*info;
layer_info[i].info=AcquireStringInfo((const size_t) length);
info=GetStringInfoDatum(layer_info[i].info);
(void) ReadBlob(image,(const size_t) length,info);
}
}
}
for (i=0; i < number_layers; i++)
{
if ((layer_info[i].page.width == 0) ||
(layer_info[i].page.height == 0))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" layer data is empty");
if (layer_info[i].info != (StringInfo *) NULL)
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
continue;
}
/*
Allocate layered image.
*/
layer_info[i].image=CloneImage(image,layer_info[i].page.width,
layer_info[i].page.height,MagickFalse,exception);
if (layer_info[i].image == (Image *) NULL)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" allocation of image for layer %.20g failed",(double) i);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
if (layer_info[i].info != (StringInfo *) NULL)
{
(void) SetImageProfile(layer_info[i].image,"psd:additional-info",
layer_info[i].info);
layer_info[i].info=DestroyStringInfo(layer_info[i].info);
}
}
if (image_info->ping == MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=0; j < layer_info[i].channels; j++)
{
if (DiscardBlobBytes(image,(MagickSizeType)
layer_info[i].channel_info[j].size) == MagickFalse)
{
layer_info=DestroyLayerInfo(layer_info,number_layers);
ThrowBinaryException(CorruptImageError,
"UnexpectedEndOfFile",image->filename);
}
}
continue;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading data for layer %.20g",(double) i);
status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i],
exception);
if (status == MagickFalse)
break;
status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType)
number_layers);
if (status == MagickFalse)
break;
}
}
if (status != MagickFalse)
{
for (i=0; i < number_layers; i++)
{
if (layer_info[i].image == (Image *) NULL)
{
for (j=i; j < number_layers - 1; j++)
layer_info[j] = layer_info[j+1];
number_layers--;
i--;
}
}
if (number_layers > 0)
{
for (i=0; i < number_layers; i++)
{
if (i > 0)
layer_info[i].image->previous=layer_info[i-1].image;
if (i < (number_layers-1))
layer_info[i].image->next=layer_info[i+1].image;
layer_info[i].image->page=layer_info[i].page;
}
image->next=layer_info[0].image;
layer_info[0].image->previous=image;
}
layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info);
}
else
layer_info=DestroyLayerInfo(layer_info,number_layers);
}
return(status);
}
static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info,
Image* image,const PSDInfo* psd_info,ExceptionInfo *exception)
{
MagickOffsetType
*sizes;
MagickBooleanType
status;
PSDCompressionType
compression;
register ssize_t
i;
compression=(PSDCompressionType) ReadBlobMSBShort(image);
image->compression=ConvertPSDCompression(compression);
if (compression != Raw && compression != RLE)
{
(void) ThrowMagickException(exception,GetMagickModule(),
TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression);
return(MagickFalse);
}
sizes=(MagickOffsetType *) NULL;
if (compression == RLE)
{
sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels);
if (sizes == (MagickOffsetType *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
status=MagickTrue;
for (i=0; i < (ssize_t) psd_info->channels; i++)
{
if (compression == RLE)
status=ReadPSDChannelRLE(image,psd_info,i,sizes+(i*image->rows),
exception);
else
status=ReadPSDChannelRaw(image,psd_info->channels,i,exception);
if (status != MagickFalse)
status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels);
if (status == MagickFalse)
break;
}
if ((status != MagickFalse) && (image->colorspace == CMYKColorspace))
status=NegateImage(image,MagickFalse);
if (status != MagickFalse)
status=CorrectPSDAlphaBlend(image_info,image,exception);
sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes);
return(status);
}
static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
has_merged_image,
skip_layers;
MagickOffsetType
offset;
MagickSizeType
length;
MagickBooleanType
status;
PSDInfo
psd_info;
register ssize_t
i;
ssize_t
count;
unsigned char
*data;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read image header.
*/
image->endian=MSBEndian;
count=ReadBlob(image,4,(unsigned char *) psd_info.signature);
psd_info.version=ReadBlobMSBShort(image);
if ((count == 0) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) ||
((psd_info.version != 1) && (psd_info.version != 2)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
(void) ReadBlob(image,6,psd_info.reserved);
psd_info.channels=ReadBlobMSBShort(image);
if (psd_info.channels > MaxPSDChannels)
ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded");
psd_info.rows=ReadBlobMSBLong(image);
psd_info.columns=ReadBlobMSBLong(image);
if ((psd_info.version == 1) && ((psd_info.rows > 30000) ||
(psd_info.columns > 30000)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.depth=ReadBlobMSBShort(image);
if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
psd_info.mode=ReadBlobMSBShort(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s",
(double) psd_info.columns,(double) psd_info.rows,(double)
psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType)
psd_info.mode));
/*
Initialize image.
*/
image->depth=psd_info.depth;
image->columns=psd_info.columns;
image->rows=psd_info.rows;
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if (SetImageBackgroundColor(image) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (psd_info.mode == LabMode)
SetImageColorspace(image,LabColorspace);
if (psd_info.mode == CMYKMode)
{
SetImageColorspace(image,CMYKColorspace);
image->matte=psd_info.channels > 4 ? MagickTrue : MagickFalse;
}
else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) ||
(psd_info.mode == DuotoneMode))
{
status=AcquireImageColormap(image,psd_info.depth != 16 ? 256 : 65536);
if (status == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image colormap allocated");
SetImageColorspace(image,GRAYColorspace);
image->matte=psd_info.channels > 1 ? MagickTrue : MagickFalse;
}
else
image->matte=psd_info.channels > 3 ? MagickTrue : MagickFalse;
/*
Read PSD raster colormap only present for indexed and duotone images.
*/
length=ReadBlobMSBLong(image);
if (length != 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading colormap");
if (psd_info.mode == DuotoneMode)
{
/*
Duotone image data; the format of this data is undocumented.
*/
data=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*data));
if (data == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
(void) ReadBlob(image,(size_t) length,data);
data=(unsigned char *) RelinquishMagickMemory(data);
}
else
{
size_t
number_colors;
/*
Read PSD raster colormap.
*/
number_colors=length/3;
if (number_colors > 65536)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireImageColormap(image,number_colors) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].red=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].green=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
for (i=0; i < (ssize_t) image->colors; i++)
image->colormap[i].blue=ScaleCharToQuantum((unsigned char)
ReadBlobByte(image));
image->matte=MagickFalse;
}
}
if ((image->depth == 1) && (image->storage_class != PseudoClass))
ThrowReaderException(CorruptImageError, "ImproperImageHeader");
has_merged_image=MagickTrue;
length=ReadBlobMSBLong(image);
if (length != 0)
{
unsigned char
*blocks;
/*
Image resources block.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading image resource blocks - %.20g bytes",(double)
((MagickOffsetType) length));
blocks=(unsigned char *) AcquireQuantumMemory((size_t) length,
sizeof(*blocks));
if (blocks == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
count=ReadBlob(image,(size_t) length,blocks);
if ((count != (ssize_t) length) || (length < 4) ||
(LocaleNCompare((char *) blocks,"8BIM",4) != 0))
{
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
ParseImageResourceBlocks(image,blocks,(size_t) length,&has_merged_image);
blocks=(unsigned char *) RelinquishMagickMemory(blocks);
}
/*
Layer and mask block.
*/
length=GetPSDSize(&psd_info,image);
if (length == 8)
{
length=ReadBlobMSBLong(image);
length=ReadBlobMSBLong(image);
}
offset=TellBlob(image);
skip_layers=MagickFalse;
if ((image_info->number_scenes == 1) && (image_info->scene == 0) &&
(has_merged_image != MagickFalse))
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" read composite only");
skip_layers=MagickTrue;
}
if (length == 0)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" image has no layers");
}
else
{
if (ReadPSDLayers(image,image_info,&psd_info,skip_layers,exception) !=
MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Skip the rest of the layer and mask information.
*/
SeekBlob(image,offset+length,SEEK_SET);
}
/*
If we are only "pinging" the image, then we're done - so return.
*/
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
Read the precombined layer, present for PSD < 4 compatibility.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" reading the precombined layer");
if (has_merged_image != MagickFalse || GetImageListLength(image) == 1)
has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image,
&psd_info,exception);
if ((has_merged_image == MagickFalse) && (GetImageListLength(image) == 1) &&
(length != 0))
{
SeekBlob(image,offset,SEEK_SET);
status=ReadPSDLayers(image,image_info,&psd_info,MagickFalse,exception);
if (status != MagickTrue)
{
(void) CloseBlob(image);
image=DestroyImageList(image);
return((Image *) NULL);
}
}
if (has_merged_image == MagickFalse)
{
Image
*merged;
if (GetImageListLength(image) == 1)
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
SetImageAlphaChannel(image,TransparentAlphaChannel);
image->background_color.opacity=TransparentOpacity;
merged=MergeImageLayers(image,FlattenLayer,exception);
ReplaceImageInList(&image,merged);
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSDImage() adds properties for the PSD image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSDImage method is:
%
% size_t RegisterPSDImage(void)
%
*/
ModuleExport size_t RegisterPSDImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("PSB");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Large Document Format");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PSD");
entry->decoder=(DecodeImageHandler *) ReadPSDImage;
entry->encoder=(EncodeImageHandler *) WritePSDImage;
entry->magick=(IsImageFormatHandler *) IsPSD;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Adobe Photoshop bitmap");
entry->module=ConstantString("PSD");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSDImage() removes format registrations made by the
% PSD module from the list of supported formats.
%
% The format of the UnregisterPSDImage method is:
%
% UnregisterPSDImage(void)
%
*/
ModuleExport void UnregisterPSDImage(void)
{
(void) UnregisterMagickInfo("PSB");
(void) UnregisterMagickInfo("PSD");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S D I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSDImage() writes an image in the Adobe Photoshop encoded image format.
%
% The format of the WritePSDImage method is:
%
% MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image,
const size_t offset)
{
if (psd_info->version == 1)
return(WriteBlobMSBShort(image,(unsigned short) offset));
return(WriteBlobMSBLong(image,(unsigned short) offset));
}
static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBShort(image,(unsigned short) size);
else
result=(WriteBlobMSBLong(image,(unsigned short) size));
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size)
{
if (psd_info->version == 1)
return(WriteBlobMSBLong(image,(unsigned int) size));
return(WriteBlobMSBLongLong(image,size));
}
static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image,
const MagickSizeType size,const MagickSizeType offset)
{
MagickSizeType
current_offset;
ssize_t
result;
current_offset=TellBlob(image);
SeekBlob(image,offset,SEEK_SET);
if (psd_info->version == 1)
result=WriteBlobMSBLong(image,(unsigned int) size);
else
result=WriteBlobMSBLongLong(image,size);
SeekBlob(image,current_offset,SEEK_SET);
return(result);
}
static size_t PSDPackbitsEncodeImage(Image *image,const size_t length,
const unsigned char *pixels,unsigned char *compact_pixels)
{
int
count;
register ssize_t
i,
j;
register unsigned char
*q;
unsigned char
*packbits;
/*
Compress pixels with Packbits encoding.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
assert(compact_pixels != (unsigned char *) NULL);
packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits));
if (packbits == (unsigned char *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
q=compact_pixels;
for (i=(ssize_t) length; i != 0; )
{
switch (i)
{
case 1:
{
i--;
*q++=(unsigned char) 0;
*q++=(*pixels);
break;
}
case 2:
{
i-=2;
*q++=(unsigned char) 1;
*q++=(*pixels);
*q++=pixels[1];
break;
}
case 3:
{
i-=3;
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
*q++=(unsigned char) ((256-3)+1);
*q++=(*pixels);
break;
}
*q++=(unsigned char) 2;
*q++=(*pixels);
*q++=pixels[1];
*q++=pixels[2];
break;
}
default:
{
if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2)))
{
/*
Packed run.
*/
count=3;
while (((ssize_t) count < i) && (*pixels == *(pixels+count)))
{
count++;
if (count >= 127)
break;
}
i-=count;
*q++=(unsigned char) ((256-count)+1);
*q++=(*pixels);
pixels+=count;
break;
}
/*
Literal run.
*/
count=0;
while ((*(pixels+count) != *(pixels+count+1)) ||
(*(pixels+count+1) != *(pixels+count+2)))
{
packbits[count+1]=pixels[count];
count++;
if (((ssize_t) count >= (i-3)) || (count >= 127))
break;
}
i-=count;
*packbits=(unsigned char) (count-1);
for (j=0; j <= (ssize_t) count; j++)
*q++=packbits[j];
pixels+=count;
break;
}
}
}
*q++=(unsigned char) 128; /* EOD marker */
packbits=(unsigned char *) RelinquishMagickMemory(packbits);
return((size_t) (q-compact_pixels));
}
static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image,
const Image *next_image,const ssize_t channels)
{
size_t
length;
ssize_t
i,
y;
if (next_image->compression == RLECompression)
{
length=WriteBlobMSBShort(image,RLE);
for (i=0; i < channels; i++)
for (y=0; y < (ssize_t) next_image->rows; y++)
length+=SetPSDOffset(psd_info,image,0);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
length=WriteBlobMSBShort(image,ZipWithoutPrediction);
#endif
else
length=WriteBlobMSBShort(image,Raw);
return(length);
}
static size_t WritePSDChannel(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
const QuantumType quantum_type, unsigned char *compact_pixels,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
int
y;
MagickBooleanType
monochrome;
QuantumInfo
*quantum_info;
register const PixelPacket
*p;
register ssize_t
i;
size_t
count,
length;
unsigned char
*pixels;
#ifdef MAGICKCORE_ZLIB_DELEGATE
#define CHUNK 16384
int
flush,
level;
unsigned char
*compressed_pixels;
z_stream
stream;
compressed_pixels=(unsigned char *) NULL;
flush=Z_NO_FLUSH;
#endif
count=0;
if (separate != MagickFalse)
{
size_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,1);
}
if (next_image->depth > 8)
next_image->depth=16;
monochrome=IsMonochromeImage(image,&image->exception) && (image->depth == 1)
? MagickTrue : MagickFalse;
quantum_info=AcquireQuantumInfo(image_info,next_image);
if (quantum_info == (QuantumInfo *) NULL)
return(0);
pixels=GetQuantumPixels(quantum_info);
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK,
sizeof(*compressed_pixels));
if (compressed_pixels == (unsigned char *) NULL)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
ResetMagickMemory(&stream,0,sizeof(stream));
stream.data_type=Z_BINARY;
level=Z_DEFAULT_COMPRESSION;
if ((image_info->quality > 0 && image_info->quality < 10))
level=(int) image_info->quality;
if (deflateInit(&stream,level) != Z_OK)
{
quantum_info=DestroyQuantumInfo(quantum_info);
return(0);
}
}
#endif
for (y=0; y < (ssize_t) next_image->rows; y++)
{
p=GetVirtualPixels(next_image,0,y,next_image->columns,1,&image->exception);
if (p == (const PixelPacket *) NULL)
break;
length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info,
quantum_type,pixels,&image->exception);
if (monochrome != MagickFalse)
for (i=0; i < (ssize_t) length; i++)
pixels[i]=(~pixels[i]);
if (next_image->compression == RLECompression)
{
length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels);
count+=WriteBlob(image,length,compact_pixels);
size_offset+=WritePSDOffset(psd_info,image,length,size_offset);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
else if (next_image->compression == ZipCompression)
{
stream.avail_in=(uInt) length;
stream.next_in=(Bytef *) pixels;
if (y == (ssize_t) next_image->rows-1)
flush=Z_FINISH;
do {
stream.avail_out=(uInt) CHUNK;
stream.next_out=(Bytef *) compressed_pixels;
if (deflate(&stream,flush) == Z_STREAM_ERROR)
break;
length=(size_t) CHUNK-stream.avail_out;
if (length > 0)
count+=WriteBlob(image,length,compressed_pixels);
} while (stream.avail_out == 0);
}
#endif
else
count+=WriteBlob(image,length,pixels);
}
#ifdef MAGICKCORE_ZLIB_DELEGATE
if (next_image->compression == ZipCompression)
{
(void) deflateEnd(&stream);
compressed_pixels=(unsigned char *) RelinquishMagickMemory(
compressed_pixels);
}
#endif
quantum_info=DestroyQuantumInfo(quantum_info);
return(count);
}
static unsigned char *AcquireCompactPixels(Image *image)
{
size_t
packet_size;
unsigned char
*compact_pixels;
packet_size=image->depth > 8UL ? 2UL : 1UL;
compact_pixels=(unsigned char *) AcquireQuantumMemory((9*
image->columns)+1,packet_size*sizeof(*compact_pixels));
if (compact_pixels == (unsigned char *) NULL)
{
(void) ThrowMagickException(&image->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
}
return(compact_pixels);
}
static ssize_t WritePSDChannels(const PSDInfo *psd_info,
const ImageInfo *image_info,Image *image,Image *next_image,
MagickOffsetType size_offset,const MagickBooleanType separate)
{
Image
*mask;
MagickOffsetType
rows_offset;
size_t
channels,
count,
length,
offset_length;
unsigned char
*compact_pixels;
count=0;
offset_length=0;
rows_offset=0;
compact_pixels=(unsigned char *) NULL;
if (next_image->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(next_image);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
channels=1;
if (separate == MagickFalse)
{
if (next_image->storage_class != PseudoClass)
{
if (IsGrayImage(next_image,&next_image->exception) == MagickFalse)
channels=next_image->colorspace == CMYKColorspace ? 4 : 3;
if (next_image->matte != MagickFalse)
channels++;
}
rows_offset=TellBlob(image)+2;
count+=WriteCompressionStart(psd_info,image,next_image,channels);
offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4));
}
size_offset+=2;
if (next_image->storage_class == PseudoClass)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
IndexQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (IsGrayImage(next_image,&next_image->exception) != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
GrayQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
else
{
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
length=WritePSDChannel(psd_info,image_info,image,next_image,
RedQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
GreenQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlueQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
if (next_image->colorspace == CMYKColorspace)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
BlackQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
if (next_image->matte != MagickFalse)
{
length=WritePSDChannel(psd_info,image_info,image,next_image,
AlphaQuantum,compact_pixels,rows_offset,separate);
if (separate != MagickFalse)
size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2;
else
rows_offset+=offset_length;
count+=length;
}
}
compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels);
if (next_image->colorspace == CMYKColorspace)
(void) NegateImage(next_image,MagickFalse);
if (separate != MagickFalse)
{
const char
*property;
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
if (mask != (Image *) NULL)
{
if (mask->compression == RLECompression)
{
compact_pixels=AcquireCompactPixels(mask);
if (compact_pixels == (unsigned char *) NULL)
return(0);
}
length=WritePSDChannel(psd_info,image_info,image,mask,
RedQuantum,compact_pixels,rows_offset,MagickTrue);
(void) WritePSDSize(psd_info,image,length,size_offset);
count+=length;
compact_pixels=(unsigned char *) RelinquishMagickMemory(
compact_pixels);
}
}
}
return(count);
}
static size_t WritePascalString(Image *image,const char *value,size_t padding)
{
size_t
count,
length;
register ssize_t
i;
/*
Max length is 255.
*/
count=0;
length=(strlen(value) > 255UL ) ? 255UL : strlen(value);
if (length == 0)
count+=WriteBlobByte(image,0);
else
{
count+=WriteBlobByte(image,(unsigned char) length);
count+=WriteBlob(image,length,(const unsigned char *) value);
}
length++;
if ((length % padding) == 0)
return(count);
for (i=0; i < (ssize_t) (padding-(length % padding)); i++)
count+=WriteBlobByte(image,0);
return(count);
}
static void WriteResolutionResourceBlock(Image *image)
{
double
x_resolution,
y_resolution;
unsigned short
units;
if (image->units == PixelsPerCentimeterResolution)
{
x_resolution=2.54*65536.0*image->x_resolution+0.5;
y_resolution=2.54*65536.0*image->y_resolution+0.5;
units=2;
}
else
{
x_resolution=65536.0*image->x_resolution+0.5;
y_resolution=65536.0*image->y_resolution+0.5;
units=1;
}
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x03ED);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,16); /* resource size */
(void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */
(void) WriteBlobMSBShort(image,units); /* width unit */
(void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5));
(void) WriteBlobMSBShort(image,units); /* vertical resolution unit */
(void) WriteBlobMSBShort(image,units); /* height unit */
}
static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image,
const signed short channel)
{
size_t
count;
count=WriteBlobMSBSignedShort(image,channel);
count+=SetPSDSize(psd_info,image,0);
return(count);
}
static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
break;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
if (id == 0x0000040f)
{
ssize_t
quantum;
quantum=PSDQuantum(count)+12;
if ((quantum >= 12) && (quantum < (ssize_t) length))
{
if ((q+quantum < (datum+length-16)))
(void) CopyMagickMemory(q,q+quantum,length-quantum-(q-datum));
SetStringInfoLength(bim_profile,length-quantum);
}
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile)
{
register const unsigned char
*p;
size_t
length;
unsigned char
*datum;
unsigned int
count,
long_sans;
unsigned short
id,
short_sans;
length=GetStringInfoLength(bim_profile);
if (length < 16)
return;
datum=GetStringInfoDatum(bim_profile);
for (p=datum; (p >= datum) && (p < (datum+length-16)); )
{
register unsigned char
*q;
ssize_t
cnt;
q=(unsigned char *) p;
if (LocaleNCompare((const char *) p,"8BIM",4) != 0)
return;
p=PushLongPixel(MSBEndian,p,&long_sans);
p=PushShortPixel(MSBEndian,p,&id);
p=PushShortPixel(MSBEndian,p,&short_sans);
p=PushLongPixel(MSBEndian,p,&count);
cnt=PSDQuantum(count);
if (cnt < 0)
return;
if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)))
{
(void) CopyMagickMemory(q,q+cnt+12,length-(cnt+12)-(q-datum));
SetStringInfoLength(bim_profile,length-(cnt+12));
break;
}
p+=count;
if ((count & 0x01) != 0)
p++;
}
}
static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info,
Image *image)
{
#define PSDKeySize 5
#define PSDAllowedLength 36
char
key[PSDKeySize];
/* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */
const char
allowed[PSDAllowedLength][PSDKeySize] = {
"blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk",
"GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr",
"lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl",
"post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA"
},
*option;
const StringInfo
*info;
MagickBooleanType
found;
register size_t
i;
size_t
remaining_length,
length;
StringInfo
*profile;
unsigned char
*p;
unsigned int
size;
info=GetImageProfile(image,"psd:additional-info");
if (info == (const StringInfo *) NULL)
return((const StringInfo *) NULL);
option=GetImageOption(image_info,"psd:additional-info");
if (LocaleCompare(option,"all") == 0)
return(info);
if (LocaleCompare(option,"selective") != 0)
{
profile=RemoveImageProfile(image,"psd:additional-info");
return(DestroyStringInfo(profile));
}
length=GetStringInfoLength(info);
p=GetStringInfoDatum(info);
remaining_length=length;
length=0;
while (remaining_length >= 12)
{
/* skip over signature */
p+=4;
key[0]=(*p++);
key[1]=(*p++);
key[2]=(*p++);
key[3]=(*p++);
key[4]='\0';
size=(unsigned int) (*p++) << 24;
size|=(unsigned int) (*p++) << 16;
size|=(unsigned int) (*p++) << 8;
size|=(unsigned int) (*p++);
size=size & 0xffffffff;
remaining_length-=12;
if ((size_t) size > remaining_length)
return((const StringInfo *) NULL);
found=MagickFalse;
for (i=0; i < PSDAllowedLength; i++)
{
if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0)
continue;
found=MagickTrue;
break;
}
remaining_length-=(size_t) size;
if (found == MagickFalse)
{
if (remaining_length > 0)
p=(unsigned char *) CopyMagickMemory(p-12,p+size,remaining_length);
continue;
}
length+=(size_t) size+12;
p+=size;
}
profile=RemoveImageProfile(image,"psd:additional-info");
if (length == 0)
return(DestroyStringInfo(profile));
SetStringInfoLength(profile,(const size_t) length);
SetImageProfile(image,"psd:additional-info",info);
return(profile);
}
static MagickBooleanType WritePSDImage(const ImageInfo *image_info,
Image *image)
{
char
layer_name[MaxTextExtent];
const char
*property;
const StringInfo
*icc_profile,
*info;
Image
*base_image,
*next_image;
MagickBooleanType
status;
MagickOffsetType
*layer_size_offsets,
size_offset;
PSDInfo
psd_info;
register ssize_t
i;
size_t
layer_count,
layer_index,
length,
name_length,
num_channels,
packet_size,
rounded_size,
size;
StringInfo
*bim_profile;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
packet_size=(size_t) (image->depth > 8 ? 6 : 3);
if (image->matte != MagickFalse)
packet_size+=image->depth > 8 ? 2 : 1;
psd_info.version=1;
if ((LocaleCompare(image_info->magick,"PSB") == 0) ||
(image->columns > 30000) || (image->rows > 30000))
psd_info.version=2;
(void) WriteBlob(image,4,(const unsigned char *) "8BPS");
(void) WriteBlobMSBShort(image,psd_info.version); /* version */
for (i=1; i <= 6; i++)
(void) WriteBlobByte(image, 0); /* 6 bytes of reserved */
if (SetImageGray(image,&image->exception) != MagickFalse)
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
if ((image_info->type != TrueColorType) && (image_info->type !=
TrueColorMatteType) && (image->storage_class == PseudoClass))
num_channels=(image->matte != MagickFalse ? 2UL : 1UL);
else
{
if (image->storage_class == PseudoClass)
(void) SetImageStorageClass(image,DirectClass);
if (image->colorspace != CMYKColorspace)
num_channels=(image->matte != MagickFalse ? 4UL : 3UL);
else
num_channels=(image->matte != MagickFalse ? 5UL : 4UL);
}
(void) WriteBlobMSBShort(image,(unsigned short) num_channels);
(void) WriteBlobMSBLong(image,(unsigned int) image->rows);
(void) WriteBlobMSBLong(image,(unsigned int) image->columns);
if (IsGrayImage(image,&image->exception) != MagickFalse)
{
MagickBooleanType
monochrome;
/*
Write depth & mode.
*/
monochrome=IsMonochromeImage(image,&image->exception) &&
(image->depth == 1) ? MagickTrue : MagickFalse;
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8));
(void) WriteBlobMSBShort(image,(unsigned short)
(monochrome != MagickFalse ? BitmapMode : GrayscaleMode));
}
else
{
(void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class ==
PseudoClass ? 8 : image->depth > 8 ? 16 : 8));
if (((image_info->colorspace != UndefinedColorspace) ||
(image->colorspace != CMYKColorspace)) &&
(image_info->colorspace != CMYKColorspace))
{
(void) TransformImageColorspace(image,sRGBColorspace);
(void) WriteBlobMSBShort(image,(unsigned short)
(image->storage_class == PseudoClass ? IndexedMode : RGBMode));
}
else
{
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,CMYKColorspace);
(void) WriteBlobMSBShort(image,CMYKMode);
}
}
if ((IsGrayImage(image,&image->exception) != MagickFalse) ||
(image->storage_class == DirectClass) || (image->colors > 256))
(void) WriteBlobMSBLong(image,0);
else
{
/*
Write PSD raster colormap.
*/
(void) WriteBlobMSBLong(image,768);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(
image->colormap[i].green));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
for (i=0; i < (ssize_t) image->colors; i++)
(void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue));
for ( ; i < 256; i++)
(void) WriteBlobByte(image,0);
}
/*
Image resource block.
*/
length=28; /* 0x03EB */
bim_profile=(StringInfo *) GetImageProfile(image,"8bim");
icc_profile=GetImageProfile(image,"icc");
if (bim_profile != (StringInfo *) NULL)
{
bim_profile=CloneStringInfo(bim_profile);
if (icc_profile != (StringInfo *) NULL)
RemoveICCProfileFromResourceBlock(bim_profile);
RemoveResolutionFromResourceBlock(bim_profile);
length+=PSDQuantum(GetStringInfoLength(bim_profile));
}
if (icc_profile != (const StringInfo *) NULL)
length+=PSDQuantum(GetStringInfoLength(icc_profile))+12;
(void) WriteBlobMSBLong(image,(unsigned int) length);
WriteResolutionResourceBlock(image);
if (bim_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,GetStringInfoLength(bim_profile),
GetStringInfoDatum(bim_profile));
bim_profile=DestroyStringInfo(bim_profile);
}
if (icc_profile != (StringInfo *) NULL)
{
(void) WriteBlob(image,4,(const unsigned char *) "8BIM");
(void) WriteBlobMSBShort(image,0x0000040F);
(void) WriteBlobMSBShort(image,0);
(void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength(
icc_profile));
(void) WriteBlob(image,GetStringInfoLength(icc_profile),
GetStringInfoDatum(icc_profile));
if ((MagickOffsetType) GetStringInfoLength(icc_profile) !=
PSDQuantum(GetStringInfoLength(icc_profile)))
(void) WriteBlobByte(image,0);
}
base_image=GetNextImageInList(image);
if (base_image == (Image *)NULL)
base_image=image;
size=0;
size_offset=TellBlob(image);
SetPSDSize(&psd_info,image,0);
SetPSDSize(&psd_info,image,0);
layer_count=0;
for (next_image=base_image; next_image != NULL; )
{
layer_count++;
next_image=GetNextImageInList(next_image);
}
if (image->matte != MagickFalse)
size+=WriteBlobMSBShort(image,-(unsigned short) layer_count);
else
size+=WriteBlobMSBShort(image,(unsigned short) layer_count);
layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory(
(size_t) layer_count,sizeof(MagickOffsetType));
if (layer_size_offsets == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
layer_index=0;
for (next_image=base_image; next_image != NULL; )
{
Image
*mask;
unsigned char
default_color;
unsigned short
channels,
total_channels;
mask=(Image *) NULL;
property=GetImageArtifact(next_image,"psd:opacity-mask");
default_color=0;
if (property != (const char *) NULL)
{
mask=(Image *) GetImageRegistry(ImageRegistryType,property,
&image->exception);
default_color=strlen(property) == 9 ? 255 : 0;
}
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.y);
size+=WriteBlobMSBLong(image,(unsigned int) next_image->page.x);
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.y+
next_image->rows));
size+=WriteBlobMSBLong(image,(unsigned int) (next_image->page.x+
next_image->columns));
channels=1U;
if ((next_image->storage_class != PseudoClass) &&
(IsGrayImage(next_image,&next_image->exception) == MagickFalse))
channels=next_image->colorspace == CMYKColorspace ? 4U : 3U;
total_channels=channels;
if (next_image->matte != MagickFalse)
total_channels++;
if (mask != (Image *) NULL)
total_channels++;
size+=WriteBlobMSBShort(image,total_channels);
layer_size_offsets[layer_index++]=TellBlob(image);
for (i=0; i < (ssize_t) channels; i++)
size+=WriteChannelSize(&psd_info,image,(signed short) i);
if (next_image->matte != MagickFalse)
size+=WriteChannelSize(&psd_info,image,-1);
if (mask != (Image *) NULL)
size+=WriteChannelSize(&psd_info,image,-2);
size+=WriteBlob(image,4,(const unsigned char *) "8BIM");
size+=WriteBlob(image,4,(const unsigned char *)
CompositeOperatorToPSDBlendMode(next_image->compose));
property=GetImageArtifact(next_image,"psd:layer.opacity");
if (property != (const char *) NULL)
{
Quantum
opacity;
opacity=(Quantum) StringToInteger(property);
size+=WriteBlobByte(image,ScaleQuantumToChar(opacity));
(void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,
&image->exception);
}
else
size+=WriteBlobByte(image,255);
size+=WriteBlobByte(image,0);
size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ?
1 << 0x02 : 1); /* layer properties - visible, etc. */
size+=WriteBlobByte(image,0);
info=GetAdditionalInformation(image_info,next_image);
property=(const char *) GetImageProperty(next_image,"label");
if (property == (const char *) NULL)
{
(void) FormatLocaleString(layer_name,MaxTextExtent,"L%.20g",
(double) layer_index);
property=layer_name;
}
name_length=strlen(property)+1;
if ((name_length % 4) != 0)
name_length+=(4-(name_length % 4));
if (info != (const StringInfo *) NULL)
name_length+=GetStringInfoLength(info);
name_length+=8;
if (mask != (Image *) NULL)
name_length+=20;
size+=WriteBlobMSBLong(image,(unsigned int) name_length);
if (mask == (Image *) NULL)
size+=WriteBlobMSBLong(image,0);
else
{
if (mask->compose != NoCompositeOp)
(void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum(
default_color),MagickTrue,&image->exception);
mask->page.y+=image->page.y;
mask->page.x+=image->page.x;
size+=WriteBlobMSBLong(image,20);
size+=WriteBlobMSBSignedLong(image,mask->page.y);
size+=WriteBlobMSBSignedLong(image,mask->page.x);
size+=WriteBlobMSBLong(image,mask->rows+mask->page.y);
size+=WriteBlobMSBLong(image,mask->columns+mask->page.x);
size+=WriteBlobByte(image,default_color);
size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0);
size+=WriteBlobMSBShort(image,0);
}
size+=WriteBlobMSBLong(image,0);
size+=WritePascalString(image,property,4);
if (info != (const StringInfo *) NULL)
size+=WriteBlob(image,GetStringInfoLength(info),
GetStringInfoDatum(info));
next_image=GetNextImageInList(next_image);
}
/*
Now the image data!
*/
next_image=base_image;
layer_index=0;
while (next_image != NULL)
{
length=WritePSDChannels(&psd_info,image_info,image,next_image,
layer_size_offsets[layer_index++],MagickTrue);
if (length == 0)
{
status=MagickFalse;
break;
}
size+=length;
next_image=GetNextImageInList(next_image);
}
(void) WriteBlobMSBLong(image,0); /* user mask data */
/*
Remove the opacity mask from the registry
*/
next_image=base_image;
while (next_image != (Image *) NULL)
{
property=GetImageArtifact(next_image,"psd:opacity-mask");
if (property != (const char *) NULL)
DeleteImageRegistry(property);
next_image=GetNextImageInList(next_image);
}
/*
Write the total size
*/
size_offset+=WritePSDSize(&psd_info,image,size+
(psd_info.version == 1 ? 8 : 16),size_offset);
if ((size/2) != ((size+1)/2))
rounded_size=size+1;
else
rounded_size=size;
(void) WritePSDSize(&psd_info,image,rounded_size,size_offset);
layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory(
layer_size_offsets);
/*
Write composite image.
*/
if (status != MagickFalse)
{
CompressionType
compression;
compression=image->compression;
if (image->compression == ZipCompression)
image->compression=RLECompression;
if (WritePSDChannels(&psd_info,image_info,image,image,0,
MagickFalse) == 0)
status=MagickFalse;
image->compression=compression;
}
(void) CloseBlob(image);
return(status);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3194_0 |
crossvul-cpp_data_bad_3253_0 | /* radare2 - LGPL - Copyright 2017 - wargio */
#include <stdlib.h>
#include <string.h>
#include <r_util.h>
#include <r_types.h>
#include "r_x509_internal.h"
#include "r_pkcs7_internal.h"
bool r_pkcs7_parse_certificaterevocationlists (RPKCS7CertificateRevocationLists *crls, RASN1Object *object) {
ut32 i;
if (!crls && !object) {
return false;
}
if (object->list.length > 0) {
crls->elements = (RX509CertificateRevocationList **) calloc (object->list.length, sizeof (RX509CertificateRevocationList*));
if (!crls->elements) {
return false;
}
crls->length = object->list.length;
for (i = 0; i < crls->length; ++i) {
crls->elements[i] = r_x509_parse_crl (object->list.objects[i]);
}
}
return true;
}
void r_pkcs7_free_certificaterevocationlists (RPKCS7CertificateRevocationLists *crls) {
ut32 i;
if (crls) {
for (i = 0; i < crls->length; ++i) {
r_x509_free_crl (crls->elements[i]);
crls->elements[i] = NULL;
}
R_FREE (crls->elements);
// Used internally pkcs #7, so it should't free crls.
}
}
bool r_pkcs7_parse_extendedcertificatesandcertificates (RPKCS7ExtendedCertificatesAndCertificates *ecac, RASN1Object *object) {
ut32 i;
if (!ecac && !object) {
return false;
}
if (object->list.length > 0) {
ecac->elements = (RX509Certificate **) calloc (object->list.length, sizeof (RX509Certificate*));
if (!ecac->elements) {
return false;
}
ecac->length = object->list.length;
for (i = 0; i < ecac->length; ++i) {
ecac->elements[i] = r_x509_parse_certificate (object->list.objects[i]);
object->list.objects[i] = NULL;
}
}
return true;
}
void r_pkcs7_free_extendedcertificatesandcertificates (RPKCS7ExtendedCertificatesAndCertificates *ecac) {
ut32 i;
if (ecac) {
for (i = 0; i < ecac->length; ++i) {
r_x509_free_certificate (ecac->elements[i]);
ecac->elements[i] = NULL;
}
R_FREE (ecac->elements);
// Used internally pkcs #7, so it should't free ecac.
}
}
bool r_pkcs7_parse_digestalgorithmidentifier (RPKCS7DigestAlgorithmIdentifiers *dai, RASN1Object *object) {
ut32 i;
if (!dai && !object) {
return false;
}
if (object->list.length > 0) {
dai->elements = (RX509AlgorithmIdentifier **) calloc (object->list.length, sizeof (RX509AlgorithmIdentifier*));
if (!dai->elements) {
return false;
}
dai->length = object->list.length;
for (i = 0; i < dai->length; ++i) {
// r_x509_parse_algorithmidentifier returns bool,
// so i have to allocate before calling the function
dai->elements[i] = (RX509AlgorithmIdentifier *) malloc (sizeof (RX509AlgorithmIdentifier));
//should i handle invalid memory? the function checks the pointer
//or it should return if dai->elements[i] == NULL ?
if (dai->elements[i]) {
//Memset is needed to initialize to 0 the structure and avoid garbage.
memset (dai->elements[i], 0, sizeof (RX509AlgorithmIdentifier));
r_x509_parse_algorithmidentifier (dai->elements[i], object->list.objects[i]);
}
}
}
return true;
}
void r_pkcs7_free_digestalgorithmidentifier (RPKCS7DigestAlgorithmIdentifiers *dai) {
ut32 i;
if (dai) {
for (i = 0; i < dai->length; ++i) {
if (dai->elements[i]) {
r_x509_free_algorithmidentifier (dai->elements[i]);
// r_x509_free_algorithmidentifier doesn't free the pointer
// because on x509 the original use was internal.
R_FREE (dai->elements[i]);
}
}
R_FREE (dai->elements);
// Used internally pkcs #7, so it should't free dai.
}
}
bool r_pkcs7_parse_contentinfo (RPKCS7ContentInfo* ci, RASN1Object *object) {
if (!ci || !object || object->list.length < 1 || !object->list.objects[0]) {
return false;
}
ci->contentType = r_asn1_stringify_oid (object->list.objects[0]->sector, object->list.objects[0]->length);
if (object->list.length > 1) {
R_PTR_MOVE (ci->content, object->list.objects[1]);
}
return true;
}
void r_pkcs7_free_contentinfo (RPKCS7ContentInfo* ci) {
if (ci) {
r_asn1_free_object (ci->content);
r_asn1_free_string (ci->contentType);
// Used internally pkcs #7, so it should't free ci.
}
}
bool r_pkcs7_parse_issuerandserialnumber (RPKCS7IssuerAndSerialNumber* iasu, RASN1Object *object) {
if (!iasu || !object || object->list.length != 2) {
return false;
}
r_x509_parse_name (&iasu->issuer, object->list.objects[0]);
R_PTR_MOVE (iasu->serialNumber, object->list.objects[1]);
return true;
}
void r_pkcs7_free_issuerandserialnumber (RPKCS7IssuerAndSerialNumber* iasu) {
if (iasu) {
r_x509_free_name (&iasu->issuer);
r_asn1_free_object (iasu->serialNumber);
// Used internally pkcs #7, so it should't free iasu.
}
}
/*
RX509AlgorithmIdentifier digestEncryptionAlgorithm;
RASN1Object *encryptedDigest;
RASN1Object *unauthenticatedAttributes; //Optional type ??
} RPKCS7SignerInfo;
*/
bool r_pkcs7_parse_signerinfo (RPKCS7SignerInfo* si, RASN1Object *object) {
RASN1Object **elems;
ut32 shift = 3;
if (!si || !object || object->list.length < 5) {
return false;
}
elems = object->list.objects;
//Following RFC
si->version = (ut32) elems[0]->sector[0];
r_pkcs7_parse_issuerandserialnumber (&si->issuerAndSerialNumber, elems[1]);
r_x509_parse_algorithmidentifier (&si->digestAlgorithm, elems[2]);
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 0) {
r_pkcs7_parse_attributes (&si->authenticatedAttributes, elems[shift]);
shift++;
}
if (shift < object->list.length) {
r_x509_parse_algorithmidentifier (&si->digestEncryptionAlgorithm, elems[shift]);
shift++;
}
if (shift < object->list.length) {
R_PTR_MOVE (si->encryptedDigest, object->list.objects[shift]);
shift++;
}
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 1) {
r_pkcs7_parse_attributes (&si->unauthenticatedAttributes, elems[shift]);
}
return true;
}
void r_pkcs7_free_signerinfo (RPKCS7SignerInfo* si) {
if (si) {
r_pkcs7_free_issuerandserialnumber (&si->issuerAndSerialNumber);
r_x509_free_algorithmidentifier (&si->digestAlgorithm);
r_pkcs7_free_attributes (&si->authenticatedAttributes);
r_x509_free_algorithmidentifier (&si->digestEncryptionAlgorithm);
r_asn1_free_object (si->encryptedDigest);
r_pkcs7_free_attributes (&si->unauthenticatedAttributes);
free (si);
}
}
bool r_pkcs7_parse_signerinfos (RPKCS7SignerInfos *ss, RASN1Object *object) {
ut32 i;
if (!ss && !object) {
return false;
}
if (object->list.length > 0) {
ss->elements = (RPKCS7SignerInfo **) calloc (object->list.length, sizeof (RPKCS7SignerInfo*));
if (!ss->elements) {
return false;
}
ss->length = object->list.length;
for (i = 0; i < ss->length; ++i) {
// r_pkcs7_parse_signerinfo returns bool,
// so i have to allocate before calling the function
ss->elements[i] = R_NEW0 (RPKCS7SignerInfo);
//should i handle invalid memory? the function checks the pointer
//or it should return if si->elements[i] == NULL ?
r_pkcs7_parse_signerinfo (ss->elements[i], object->list.objects[i]);
}
}
return true;
}
void r_pkcs7_free_signerinfos (RPKCS7SignerInfos *ss) {
ut32 i;
if (ss) {
for (i = 0; i < ss->length; i++) {
r_pkcs7_free_signerinfo (ss->elements[i]);
ss->elements[i] = NULL;
}
R_FREE (ss->elements);
// Used internally pkcs #7, so it should't free ss.
}
}
bool r_pkcs7_parse_signeddata (RPKCS7SignedData *sd, RASN1Object *object) {
RASN1Object **elems;
ut32 shift = 3;
if (!sd || !object || object->list.length < 4) {
return false;
}
memset (sd, 0, sizeof (RPKCS7SignedData));
elems = object->list.objects;
//Following RFC
sd->version = (ut32) elems[0]->sector[0];
r_pkcs7_parse_digestalgorithmidentifier (&sd->digestAlgorithms, elems[1]);
r_pkcs7_parse_contentinfo (&sd->contentInfo, elems[2]);
//Optional
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 0) {
r_pkcs7_parse_extendedcertificatesandcertificates (&sd->certificates, elems[shift]);
shift++;
}
//Optional
if (shift < object->list.length && elems[shift]->klass == CLASS_CONTEXT && elems[shift]->tag == 1) {
r_pkcs7_parse_certificaterevocationlists (&sd->crls, elems[shift]);
shift++;
}
if (shift < object->list.length) {
r_pkcs7_parse_signerinfos (&sd->signerinfos, elems[shift]);
}
return true;
}
void r_pkcs7_free_signeddata (RPKCS7SignedData* sd) {
if (sd) {
r_pkcs7_free_digestalgorithmidentifier (&sd->digestAlgorithms);
r_pkcs7_free_contentinfo (&sd->contentInfo);
r_pkcs7_free_extendedcertificatesandcertificates (&sd->certificates);
r_pkcs7_free_certificaterevocationlists (&sd->crls);
r_pkcs7_free_signerinfos (&sd->signerinfos);
// Used internally pkcs #7, so it should't free sd.
}
}
RCMS *r_pkcs7_parse_cms (const ut8 *buffer, ut32 length) {
RASN1Object *object;
RCMS *container;
if (!buffer || !length) {
return NULL;
}
container = R_NEW0 (RCMS);
if (!container) {
return NULL;
}
object = r_asn1_create_object (buffer, length);
if (!object || object->list.length != 2 || !object->list.objects[0] || object->list.objects[1]->list.length != 1) {
r_asn1_free_object (object);
free (container);
return NULL;
}
container->contentType = r_asn1_stringify_oid (object->list.objects[0]->sector, object->list.objects[0]->length);
r_pkcs7_parse_signeddata (&container->signedData, object->list.objects[1]->list.objects[0]);
r_asn1_free_object (object);
return container;
}
void r_pkcs7_free_cms (RCMS* container) {
if (container) {
r_asn1_free_string (container->contentType);
r_pkcs7_free_signeddata (&container->signedData);
free (container);
}
}
RPKCS7Attribute* r_pkcs7_parse_attribute (RASN1Object *object) {
RPKCS7Attribute* attribute;
if (!object || object->list.length < 1) {
return NULL;
}
attribute = R_NEW0 (RPKCS7Attribute);
if (!attribute) {
return NULL;
}
if (object->list.objects[0]) {
attribute->oid = r_asn1_stringify_oid (object->list.objects[0]->sector, object->list.objects[0]->length);
}
if (object->list.length == 2) {
R_PTR_MOVE (attribute->data, object->list.objects[1]);
}
return attribute;
}
void r_pkcs7_free_attribute (RPKCS7Attribute* attribute) {
if (attribute) {
r_asn1_free_object (attribute->data);
r_asn1_free_string (attribute->oid);
free (attribute);
}
}
bool r_pkcs7_parse_attributes (RPKCS7Attributes* attributes, RASN1Object *object) {
ut32 i;
if (!attributes || !object || !object->list.length) {
return false;
}
attributes->length = object->list.length;
if (attributes->length > 0) {
attributes->elements = R_NEWS0 (RPKCS7Attribute*, attributes->length);
if (!attributes->elements) {
attributes->length = 0;
return false;
}
for (i = 0; i < object->list.length; ++i) {
attributes->elements[i] = r_pkcs7_parse_attribute (object->list.objects[i]);
}
}
return true;
}
void r_pkcs7_free_attributes (RPKCS7Attributes* attributes) {
ut32 i;
if (attributes) {
for (i = 0; i < attributes->length; ++i) {
r_pkcs7_free_attribute (attributes->elements[i]);
}
R_FREE (attributes->elements);
// Used internally pkcs #7, so it should't free attributes.
}
}
char* r_pkcs7_signerinfos_dump (RX509CertificateRevocationList *crl, char* buffer, ut32 length, const char* pad) {
RASN1String *algo = NULL, *last = NULL, *next = NULL;
ut32 i, p;
int r;
char *tmp, *pad2, *pad3;
if (!crl || !buffer || !length) {
return NULL;
}
if (!pad) {
pad = "";
}
pad3 = r_str_newf ("%s ", pad);
if (!pad3) return NULL;
pad2 = pad3 + 2;
algo = crl->signature.algorithm;
last = crl->lastUpdate;
next = crl->nextUpdate;
r = snprintf (buffer, length, "%sCRL:\n%sSignature:\n%s%s\n%sIssuer\n",
pad, pad2, pad3, algo ? algo->string : "", pad2);
p = (ut32) r;
if (r < 0 || !(tmp = r_x509_name_dump (&crl->issuer, buffer + p, length - p, pad3))) {
free (pad3);
return NULL;
}
p = tmp - buffer;
if (length <= p) {
free (pad3);
return NULL;
}
r = snprintf (buffer + p, length - p, "%sLast Update: %s\n%sNext Update: %s\n%sRevoked Certificates:\n",
pad2, last ? last->string : "Missing",
pad2, next ? next->string : "Missing", pad2);
p += (ut32) r;
if (r < 0) {
free (pad3);
return NULL;
}
for (i = 0; i < crl->length; ++i) {
if (length <= p || !(tmp = r_x509_crlentry_dump (crl->revokedCertificates[i], buffer + p, length - p, pad3))) {
free (pad3);
return NULL;
}
p = tmp - buffer;
}
free (pad3);
return buffer + p;
}
char* r_x509_signedinfo_dump (RPKCS7SignerInfo *si, char* buffer, ut32 length, const char* pad) {
RASN1String *s = NULL;
RASN1Object *o = NULL;
ut32 i, p;
int r;
char *tmp, *pad2, *pad3;
if (!si || !buffer || !length) {
return NULL;
}
if (!pad) {
pad = "";
}
pad3 = r_str_newf ("%s ", pad);
if (!pad3) {
return NULL;
}
pad2 = pad3 + 2;
r = snprintf (buffer, length, "%sSignerInfo:\n%sVersion: v%u\n%sIssuer\n", pad, pad2, si->version + 1, pad2);
p = (ut32) r;
if (r < 0) {
free (pad3);
return NULL;
}
if (length <= p || !(tmp = r_x509_name_dump (&si->issuerAndSerialNumber.issuer, buffer + p, length - p, pad3))) {
free (pad3);
return NULL;
}
p = tmp - buffer;
if ((o = si->issuerAndSerialNumber.serialNumber)) {
s = r_asn1_stringify_integer (o->sector, o->length);
} else {
s = NULL;
}
if (length <= p) {
free (pad3);
return NULL;
}
r = snprintf (buffer + p, length - p, "%sSerial Number:\n%s%s\n", pad2, pad3, s ? s->string : "Missing");
p += (ut32) r;
r_asn1_free_string (s);
if (r < 0 || length <= p) {
free (pad3);
return NULL;
}
s = si->digestAlgorithm.algorithm;
r = snprintf (buffer + p, length - p, "%sDigest Algorithm:\n%s%s\n%sAuthenticated Attributes:\n",
pad2, pad3, s ? s->string : "Missing", pad2);
p += (ut32) r;
if (r < 0 || length <= p) {
free (pad3);
return NULL;
}
for (i = 0; i < si->authenticatedAttributes.length; ++i) {
RPKCS7Attribute* attr = si->authenticatedAttributes.elements[i];
if (!attr) continue;
r = snprintf (buffer + p, length - p, "%s%s: %u bytes\n",
pad3, attr->oid ? attr->oid->string : "Missing", attr->data ? attr->data->length : 0);
p += (ut32) r;
if (r < 0 || length <= p) {
free (pad3);
return NULL;
}
}
s = si->digestEncryptionAlgorithm.algorithm;
if (length <= p) {
free (pad3);
return NULL;
}
r = snprintf (buffer + p, length - p, "%sDigest Encryption Algorithm\n%s%s\n",
pad2, pad3, s ? s->string : "Missing");
p += (ut32) r;
if (r < 0 || length <= p) {
free (pad3);
return NULL;
}
// if ((o = si->encryptedDigest)) s = r_asn1_stringify_bytes (o->sector, o->length);
// else s = NULL;
// r = snprintf (buffer + p, length - p, "%sEncrypted Digest: %u bytes\n%s\n", pad2, o ? o->length : 0, s ? s->string : "Missing");
// p += (ut32) r;
// r_asn1_free_string (s);
r = snprintf (buffer + p, length - p, "%sEncrypted Digest: %u bytes\n", pad2, o ? o->length : 0);
if (r < 0 || length <= p) {
free (pad3);
return NULL;
}
r = snprintf (buffer + p, length - p, "%sUnauthenticated Attributes:\n", pad2);
p += (ut32) r;
if (r < 0 || length <= p) {
free (pad3);
return NULL;
}
for (i = 0; i < si->unauthenticatedAttributes.length; ++i) {
RPKCS7Attribute* attr = si->unauthenticatedAttributes.elements[i];
if (!attr) {
continue;
}
o = attr->data;
r = snprintf (buffer + p, length - p, "%s%s: %u bytes\n",
pad3, attr->oid ? attr->oid->string : "Missing", o ? o->length : 0);
p += (ut32) r;
if (r < 0 || length <= p) {
free (pad3);
return NULL;
}
}
free (pad3);
return buffer + p;
}
char *r_pkcs7_cms_dump (RCMS* container) {
RPKCS7SignedData *sd;
ut32 i, length, p = 0;
int r;
char *buffer = NULL, *tmp = NULL;
if (!container) {
return NULL;
}
sd = &container->signedData;
length = 2048 + (container->signedData.certificates.length * 1024);
if(!length) {
return NULL;
}
buffer = (char*) calloc (1, length);
if (!buffer) {
return NULL;
}
r = snprintf (buffer, length, "signedData\n Version: %u\n Digest Algorithms:\n", sd->version);
p += (ut32) r;
if (r < 0 || length <= p) {
free (buffer);
return NULL;
}
if (container->signedData.digestAlgorithms.elements) {
for (i = 0; i < container->signedData.digestAlgorithms.length; ++i) {
if (container->signedData.digestAlgorithms.elements[i]) {
RASN1String *s = container->signedData.digestAlgorithms.elements[i]->algorithm;
r = snprintf (buffer + p, length - p, " %s\n", s ? s->string : "Missing");
p += (ut32) r;
if (r < 0 || length <= p) {
free (buffer);
return NULL;
}
}
}
}
r = snprintf (buffer + p, length - p, " Certificates: %u\n", container->signedData.certificates.length);
p += (ut32) r;
if (r < 0 || length <= p) {
free (buffer);
return NULL;
}
for (i = 0; i < container->signedData.certificates.length; ++i) {
if (length <= p || !(tmp = r_x509_certificate_dump (container->signedData.certificates.elements[i], buffer + p, length - p, " "))) {
free (buffer);
return NULL;
}
p = tmp - buffer;
}
for (i = 0; i < container->signedData.crls.length; ++i) {
if (length <= p || !(tmp = r_x509_crl_dump (container->signedData.crls.elements[i], buffer + p, length - p, " "))) {
free (buffer);
return NULL;
}
p = tmp - buffer;
}
p = tmp - buffer;
if (length <= p) {
free (buffer);
return NULL;
}
r = snprintf (buffer + p, length - p, " SignerInfos:\n");
p += (ut32) r;
if (r < 0 || length <= p) {
free (buffer);
return NULL;
}
if (container->signedData.signerinfos.elements) {
for (i = 0; i < container->signedData.signerinfos.length; ++i) {
if (length <= p || !(tmp = r_x509_signedinfo_dump (container->signedData.signerinfos.elements[i], buffer + p, length - p, " "))) {
free (buffer);
return NULL;
}
p = tmp - buffer;
}
}
return buffer;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3253_0 |
crossvul-cpp_data_bad_214_1 | // SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_errortag.h"
#include "xfs_error.h"
#include "xfs_cksum.h"
#include "xfs_icache.h"
#include "xfs_trans.h"
#include "xfs_ialloc.h"
#include "xfs_dir2.h"
#include <linux/iversion.h>
/*
* Check that none of the inode's in the buffer have a next
* unlinked field of 0.
*/
#if defined(DEBUG)
void
xfs_inobp_check(
xfs_mount_t *mp,
xfs_buf_t *bp)
{
int i;
int j;
xfs_dinode_t *dip;
j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
for (i = 0; i < j; i++) {
dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
if (!dip->di_next_unlinked) {
xfs_alert(mp,
"Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
i, (long long)bp->b_bn);
}
}
}
#endif
bool
xfs_dinode_good_version(
struct xfs_mount *mp,
__u8 version)
{
if (xfs_sb_version_hascrc(&mp->m_sb))
return version == 3;
return version == 1 || version == 2;
}
/*
* If we are doing readahead on an inode buffer, we might be in log recovery
* reading an inode allocation buffer that hasn't yet been replayed, and hence
* has not had the inode cores stamped into it. Hence for readahead, the buffer
* may be potentially invalid.
*
* If the readahead buffer is invalid, we need to mark it with an error and
* clear the DONE status of the buffer so that a followup read will re-read it
* from disk. We don't report the error otherwise to avoid warnings during log
* recovery and we don't get unnecssary panics on debug kernels. We use EIO here
* because all we want to do is say readahead failed; there is no-one to report
* the error to, so this will distinguish it from a non-ra verifier failure.
* Changes to this readahead error behavour also need to be reflected in
* xfs_dquot_buf_readahead_verify().
*/
static void
xfs_inode_buf_verify(
struct xfs_buf *bp,
bool readahead)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
xfs_agnumber_t agno;
int i;
int ni;
/*
* Validate the magic number and version of every inode in the buffer
*/
agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
for (i = 0; i < ni; i++) {
int di_ok;
xfs_dinode_t *dip;
xfs_agino_t unlinked_ino;
dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
xfs_dinode_good_version(mp, dip->di_version) &&
(unlinked_ino == NULLAGINO ||
xfs_verify_agino(mp, agno, unlinked_ino));
if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
XFS_ERRTAG_ITOBP_INOTOBP))) {
if (readahead) {
bp->b_flags &= ~XBF_DONE;
xfs_buf_ioerror(bp, -EIO);
return;
}
#ifdef DEBUG
xfs_alert(mp,
"bad inode magic/vsn daddr %lld #%d (magic=%x)",
(unsigned long long)bp->b_bn, i,
be16_to_cpu(dip->di_magic));
#endif
xfs_buf_verifier_error(bp, -EFSCORRUPTED,
__func__, dip, sizeof(*dip),
NULL);
return;
}
}
}
static void
xfs_inode_buf_read_verify(
struct xfs_buf *bp)
{
xfs_inode_buf_verify(bp, false);
}
static void
xfs_inode_buf_readahead_verify(
struct xfs_buf *bp)
{
xfs_inode_buf_verify(bp, true);
}
static void
xfs_inode_buf_write_verify(
struct xfs_buf *bp)
{
xfs_inode_buf_verify(bp, false);
}
const struct xfs_buf_ops xfs_inode_buf_ops = {
.name = "xfs_inode",
.verify_read = xfs_inode_buf_read_verify,
.verify_write = xfs_inode_buf_write_verify,
};
const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
.name = "xxfs_inode_ra",
.verify_read = xfs_inode_buf_readahead_verify,
.verify_write = xfs_inode_buf_write_verify,
};
/*
* This routine is called to map an inode to the buffer containing the on-disk
* version of the inode. It returns a pointer to the buffer containing the
* on-disk inode in the bpp parameter, and in the dipp parameter it returns a
* pointer to the on-disk inode within that buffer.
*
* If a non-zero error is returned, then the contents of bpp and dipp are
* undefined.
*/
int
xfs_imap_to_bp(
struct xfs_mount *mp,
struct xfs_trans *tp,
struct xfs_imap *imap,
struct xfs_dinode **dipp,
struct xfs_buf **bpp,
uint buf_flags,
uint iget_flags)
{
struct xfs_buf *bp;
int error;
buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
(int)imap->im_len, buf_flags, &bp,
&xfs_inode_buf_ops);
if (error) {
if (error == -EAGAIN) {
ASSERT(buf_flags & XBF_TRYLOCK);
return error;
}
xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
__func__, error);
return error;
}
*bpp = bp;
*dipp = xfs_buf_offset(bp, imap->im_boffset);
return 0;
}
void
xfs_inode_from_disk(
struct xfs_inode *ip,
struct xfs_dinode *from)
{
struct xfs_icdinode *to = &ip->i_d;
struct inode *inode = VFS_I(ip);
/*
* Convert v1 inodes immediately to v2 inode format as this is the
* minimum inode version format we support in the rest of the code.
*/
to->di_version = from->di_version;
if (to->di_version == 1) {
set_nlink(inode, be16_to_cpu(from->di_onlink));
to->di_projid_lo = 0;
to->di_projid_hi = 0;
to->di_version = 2;
} else {
set_nlink(inode, be32_to_cpu(from->di_nlink));
to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
}
to->di_format = from->di_format;
to->di_uid = be32_to_cpu(from->di_uid);
to->di_gid = be32_to_cpu(from->di_gid);
to->di_flushiter = be16_to_cpu(from->di_flushiter);
/*
* Time is signed, so need to convert to signed 32 bit before
* storing in inode timestamp which may be 64 bit. Otherwise
* a time before epoch is converted to a time long after epoch
* on 64 bit systems.
*/
inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
inode->i_generation = be32_to_cpu(from->di_gen);
inode->i_mode = be16_to_cpu(from->di_mode);
to->di_size = be64_to_cpu(from->di_size);
to->di_nblocks = be64_to_cpu(from->di_nblocks);
to->di_extsize = be32_to_cpu(from->di_extsize);
to->di_nextents = be32_to_cpu(from->di_nextents);
to->di_anextents = be16_to_cpu(from->di_anextents);
to->di_forkoff = from->di_forkoff;
to->di_aformat = from->di_aformat;
to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
to->di_dmstate = be16_to_cpu(from->di_dmstate);
to->di_flags = be16_to_cpu(from->di_flags);
if (to->di_version == 3) {
inode_set_iversion_queried(inode,
be64_to_cpu(from->di_changecount));
to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
to->di_flags2 = be64_to_cpu(from->di_flags2);
to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
}
}
void
xfs_inode_to_disk(
struct xfs_inode *ip,
struct xfs_dinode *to,
xfs_lsn_t lsn)
{
struct xfs_icdinode *from = &ip->i_d;
struct inode *inode = VFS_I(ip);
to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
to->di_onlink = 0;
to->di_version = from->di_version;
to->di_format = from->di_format;
to->di_uid = cpu_to_be32(from->di_uid);
to->di_gid = cpu_to_be32(from->di_gid);
to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
memset(to->di_pad, 0, sizeof(to->di_pad));
to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
to->di_nlink = cpu_to_be32(inode->i_nlink);
to->di_gen = cpu_to_be32(inode->i_generation);
to->di_mode = cpu_to_be16(inode->i_mode);
to->di_size = cpu_to_be64(from->di_size);
to->di_nblocks = cpu_to_be64(from->di_nblocks);
to->di_extsize = cpu_to_be32(from->di_extsize);
to->di_nextents = cpu_to_be32(from->di_nextents);
to->di_anextents = cpu_to_be16(from->di_anextents);
to->di_forkoff = from->di_forkoff;
to->di_aformat = from->di_aformat;
to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
to->di_dmstate = cpu_to_be16(from->di_dmstate);
to->di_flags = cpu_to_be16(from->di_flags);
if (from->di_version == 3) {
to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(ip->i_ino);
to->di_lsn = cpu_to_be64(lsn);
memset(to->di_pad2, 0, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
to->di_flushiter = 0;
} else {
to->di_flushiter = cpu_to_be16(from->di_flushiter);
}
}
void
xfs_log_dinode_to_disk(
struct xfs_log_dinode *from,
struct xfs_dinode *to)
{
to->di_magic = cpu_to_be16(from->di_magic);
to->di_mode = cpu_to_be16(from->di_mode);
to->di_version = from->di_version;
to->di_format = from->di_format;
to->di_onlink = 0;
to->di_uid = cpu_to_be32(from->di_uid);
to->di_gid = cpu_to_be32(from->di_gid);
to->di_nlink = cpu_to_be32(from->di_nlink);
to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
to->di_size = cpu_to_be64(from->di_size);
to->di_nblocks = cpu_to_be64(from->di_nblocks);
to->di_extsize = cpu_to_be32(from->di_extsize);
to->di_nextents = cpu_to_be32(from->di_nextents);
to->di_anextents = cpu_to_be16(from->di_anextents);
to->di_forkoff = from->di_forkoff;
to->di_aformat = from->di_aformat;
to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
to->di_dmstate = cpu_to_be16(from->di_dmstate);
to->di_flags = cpu_to_be16(from->di_flags);
to->di_gen = cpu_to_be32(from->di_gen);
if (from->di_version == 3) {
to->di_changecount = cpu_to_be64(from->di_changecount);
to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
to->di_flags2 = cpu_to_be64(from->di_flags2);
to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
to->di_ino = cpu_to_be64(from->di_ino);
to->di_lsn = cpu_to_be64(from->di_lsn);
memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &from->di_uuid);
to->di_flushiter = 0;
} else {
to->di_flushiter = cpu_to_be16(from->di_flushiter);
}
}
xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
xfs_ino_t ino,
struct xfs_dinode *dip)
{
xfs_failaddr_t fa;
uint16_t mode;
uint16_t flags;
uint64_t flags2;
uint64_t di_size;
if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
return __this_address;
/* Verify v3 integrity information first */
if (dip->di_version >= 3) {
if (!xfs_sb_version_hascrc(&mp->m_sb))
return __this_address;
if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF))
return __this_address;
if (be64_to_cpu(dip->di_ino) != ino)
return __this_address;
if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
}
/* don't allow invalid i_size */
di_size = be64_to_cpu(dip->di_size);
if (di_size & (1ULL << 63))
return __this_address;
mode = be16_to_cpu(dip->di_mode);
if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
return __this_address;
/* No zero-length symlinks/dirs. */
if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
return __this_address;
/* Fork checks carried over from xfs_iformat_fork */
if (mode &&
be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
be64_to_cpu(dip->di_nblocks))
return __this_address;
if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
return __this_address;
flags = be16_to_cpu(dip->di_flags);
if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
return __this_address;
/* Do we have appropriate data fork formats for the mode? */
switch (mode & S_IFMT) {
case S_IFIFO:
case S_IFCHR:
case S_IFBLK:
case S_IFSOCK:
if (dip->di_format != XFS_DINODE_FMT_DEV)
return __this_address;
break;
case S_IFREG:
case S_IFLNK:
case S_IFDIR:
switch (dip->di_format) {
case XFS_DINODE_FMT_LOCAL:
/*
* no local regular files yet
*/
if (S_ISREG(mode))
return __this_address;
if (di_size > XFS_DFORK_DSIZE(dip, mp))
return __this_address;
if (dip->di_nextents)
return __this_address;
/* fall through */
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
break;
default:
return __this_address;
}
break;
case 0:
/* Uninitialized inode ok. */
break;
default:
return __this_address;
}
if (XFS_DFORK_Q(dip)) {
switch (dip->di_aformat) {
case XFS_DINODE_FMT_LOCAL:
if (dip->di_anextents)
return __this_address;
/* fall through */
case XFS_DINODE_FMT_EXTENTS:
case XFS_DINODE_FMT_BTREE:
break;
default:
return __this_address;
}
} else {
/*
* If there is no fork offset, this may be a freshly-made inode
* in a new disk cluster, in which case di_aformat is zeroed.
* Otherwise, such an inode must be in EXTENTS format; this goes
* for freed inodes as well.
*/
switch (dip->di_aformat) {
case 0:
case XFS_DINODE_FMT_EXTENTS:
break;
default:
return __this_address;
}
if (dip->di_anextents)
return __this_address;
}
/* extent size hint validation */
fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
mode, flags);
if (fa)
return fa;
/* only version 3 or greater inodes are extensively verified here */
if (dip->di_version < 3)
return NULL;
flags2 = be64_to_cpu(dip->di_flags2);
/* don't allow reflink/cowextsize if we don't have reflink */
if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
!xfs_sb_version_hasreflink(&mp->m_sb))
return __this_address;
/* only regular files get reflink */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
return __this_address;
/* don't let reflink and realtime mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
return __this_address;
/* don't let reflink and dax mix */
if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
return __this_address;
/* COW extent size hint validation */
fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
mode, flags, flags2);
if (fa)
return fa;
return NULL;
}
void
xfs_dinode_calc_crc(
struct xfs_mount *mp,
struct xfs_dinode *dip)
{
uint32_t crc;
if (dip->di_version < 3)
return;
ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
XFS_DINODE_CRC_OFF);
dip->di_crc = xfs_end_cksum(crc);
}
/*
* Read the disk inode attributes into the in-core inode structure.
*
* For version 5 superblocks, if we are initialising a new inode and we are not
* utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
* inode core with a random generation number. If we are keeping inodes around,
* we need to read the inode cluster to get the existing generation number off
* disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
* format) then log recovery is dependent on the di_flushiter field being
* initialised from the current on-disk value and hence we must also read the
* inode off disk.
*/
int
xfs_iread(
xfs_mount_t *mp,
xfs_trans_t *tp,
xfs_inode_t *ip,
uint iget_flags)
{
xfs_buf_t *bp;
xfs_dinode_t *dip;
xfs_failaddr_t fa;
int error;
/*
* Fill in the location information in the in-core inode.
*/
error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
if (error)
return error;
/* shortcut IO on inode allocation if possible */
if ((iget_flags & XFS_IGET_CREATE) &&
xfs_sb_version_hascrc(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
/* initialise the on-disk inode core */
memset(&ip->i_d, 0, sizeof(ip->i_d));
VFS_I(ip)->i_generation = prandom_u32();
ip->i_d.di_version = 3;
return 0;
}
/*
* Get pointers to the on-disk inode and the buffer containing it.
*/
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
if (error)
return error;
/* even unallocated inodes are verified */
fa = xfs_dinode_verify(mp, ip->i_ino, dip);
if (fa) {
xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
sizeof(*dip), fa);
error = -EFSCORRUPTED;
goto out_brelse;
}
/*
* If the on-disk inode is already linked to a directory
* entry, copy all of the inode into the in-core inode.
* xfs_iformat_fork() handles copying in the inode format
* specific information.
* Otherwise, just get the truly permanent information.
*/
if (dip->di_mode) {
xfs_inode_from_disk(ip, dip);
error = xfs_iformat_fork(ip, dip);
if (error) {
#ifdef DEBUG
xfs_alert(mp, "%s: xfs_iformat() returned error %d",
__func__, error);
#endif /* DEBUG */
goto out_brelse;
}
} else {
/*
* Partial initialisation of the in-core inode. Just the bits
* that xfs_ialloc won't overwrite or relies on being correct.
*/
ip->i_d.di_version = dip->di_version;
VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
/*
* Make sure to pull in the mode here as well in
* case the inode is released without being used.
* This ensures that xfs_inactive() will see that
* the inode is already free and not try to mess
* with the uninitialized part of it.
*/
VFS_I(ip)->i_mode = 0;
}
ASSERT(ip->i_d.di_version >= 2);
ip->i_delayed_blks = 0;
/*
* Mark the buffer containing the inode as something to keep
* around for a while. This helps to keep recently accessed
* meta-data in-core longer.
*/
xfs_buf_set_ref(bp, XFS_INO_REF);
/*
* Use xfs_trans_brelse() to release the buffer containing the on-disk
* inode, because it was acquired with xfs_trans_read_buf() in
* xfs_imap_to_bp() above. If tp is NULL, this is just a normal
* brelse(). If we're within a transaction, then xfs_trans_brelse()
* will only release the buffer if it is not dirty within the
* transaction. It will be OK to release the buffer in this case,
* because inodes on disk are never destroyed and we will be locking the
* new in-core inode before putting it in the cache where other
* processes can find it. Thus we don't have to worry about the inode
* being changed just because we released the buffer.
*/
out_brelse:
xfs_trans_brelse(tp, bp);
return error;
}
/*
* Validate di_extsize hint.
*
* The rules are documented at xfs_ioctl_setattr_check_extsize().
* These functions must be kept in sync with each other.
*/
xfs_failaddr_t
xfs_inode_validate_extsize(
struct xfs_mount *mp,
uint32_t extsize,
uint16_t mode,
uint16_t flags)
{
bool rt_flag;
bool hint_flag;
bool inherit_flag;
uint32_t extsize_bytes;
uint32_t blocksize_bytes;
rt_flag = (flags & XFS_DIFLAG_REALTIME);
hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
extsize_bytes = XFS_FSB_TO_B(mp, extsize);
if (rt_flag)
blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
else
blocksize_bytes = mp->m_sb.sb_blocksize;
if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
return __this_address;
if (hint_flag && !S_ISREG(mode))
return __this_address;
if (inherit_flag && !S_ISDIR(mode))
return __this_address;
if ((hint_flag || inherit_flag) && extsize == 0)
return __this_address;
if (!(hint_flag || inherit_flag) && extsize != 0)
return __this_address;
if (extsize_bytes % blocksize_bytes)
return __this_address;
if (extsize > MAXEXTLEN)
return __this_address;
if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
return __this_address;
return NULL;
}
/*
* Validate di_cowextsize hint.
*
* The rules are documented at xfs_ioctl_setattr_check_cowextsize().
* These functions must be kept in sync with each other.
*/
xfs_failaddr_t
xfs_inode_validate_cowextsize(
struct xfs_mount *mp,
uint32_t cowextsize,
uint16_t mode,
uint16_t flags,
uint64_t flags2)
{
bool rt_flag;
bool hint_flag;
uint32_t cowextsize_bytes;
rt_flag = (flags & XFS_DIFLAG_REALTIME);
hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
return __this_address;
if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
return __this_address;
if (hint_flag && cowextsize == 0)
return __this_address;
if (!hint_flag && cowextsize != 0)
return __this_address;
if (hint_flag && rt_flag)
return __this_address;
if (cowextsize_bytes % mp->m_sb.sb_blocksize)
return __this_address;
if (cowextsize > MAXEXTLEN)
return __this_address;
if (cowextsize > mp->m_sb.sb_agblocks / 2)
return __this_address;
return NULL;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_214_1 |
crossvul-cpp_data_bad_272_0 | #include <stdio.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "gd_intern.h"
/* 2.03: don't include zlib here or we can't build without PNG */
#include "gd.h"
#include "gdhelpers.h"
#include "gd_color.h"
#include "gd_errors.h"
/* 2.0.12: this now checks the clipping rectangle */
#define gdImageBoundsSafeMacro(im, x, y) (!((((y) < (im)->cy1) || ((y) > (im)->cy2)) || (((x) < (im)->cx1) || ((x) > (im)->cx2))))
#ifdef _OSD_POSIX /* BS2000 uses the EBCDIC char set instead of ASCII */
#define CHARSET_EBCDIC
#define __attribute__(any) /*nothing */
#endif
/*_OSD_POSIX*/
#ifndef CHARSET_EBCDIC
#define ASC(ch) ch
#else /*CHARSET_EBCDIC */
#define ASC(ch) gd_toascii[(unsigned char)ch]
static const unsigned char gd_toascii[256] = {
/*00 */ 0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f,
0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /*................ */
/*10 */ 0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97,
0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f, /*................ */
/*20 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, /*................ */
/*30 */ 0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04,
0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a, /*................ */
/*40 */ 0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5,
0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c, /* .........`.<(+| */
/*50 */ 0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef,
0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f, /*&.........!$*);. */
/*60 */ 0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5,
0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
/*-/........^,%_>?*/
/*70 */ 0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf,
0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22, /*..........:#@'=" */
/*80 */ 0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1, /*.abcdefghi...... */
/*90 */ 0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4, /*.jklmnopqr...... */
/*a0 */ 0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae, /*..stuvwxyz...... */
/*b0 */ 0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc,
0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7, /*...........[\].. */
/*c0 */ 0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5, /*.ABCDEFGHI...... */
/*d0 */ 0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff, /*.JKLMNOPQR...... */
/*e0 */ 0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5, /*..STUVWXYZ...... */
/*f0 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e /*0123456789.{.}.~ */
};
#endif /*CHARSET_EBCDIC */
extern const int gdCosT[];
extern const int gdSinT[];
/**
* Group: Error Handling
*/
void gd_stderr_error(int priority, const char *format, va_list args)
{
switch (priority) {
case GD_ERROR:
fputs("GD Error: ", stderr);
break;
case GD_WARNING:
fputs("GD Warning: ", stderr);
break;
case GD_NOTICE:
fputs("GD Notice: ", stderr);
break;
case GD_INFO:
fputs("GD Info: ", stderr);
break;
case GD_DEBUG:
fputs("GD Debug: ", stderr);
break;
}
vfprintf(stderr, format, args);
fflush(stderr);
}
static gdErrorMethod gd_error_method = gd_stderr_error;
static void _gd_error_ex(int priority, const char *format, va_list args)
{
if (gd_error_method) {
gd_error_method(priority, format, args);
}
}
void gd_error(const char *format, ...)
{
va_list args;
va_start(args, format);
_gd_error_ex(GD_WARNING, format, args);
va_end(args);
}
void gd_error_ex(int priority, const char *format, ...)
{
va_list args;
va_start(args, format);
_gd_error_ex(priority, format, args);
va_end(args);
}
/*
Function: gdSetErrorMethod
*/
BGD_DECLARE(void) gdSetErrorMethod(gdErrorMethod error_method)
{
gd_error_method = error_method;
}
/*
Function: gdClearErrorMethod
*/
BGD_DECLARE(void) gdClearErrorMethod(void)
{
gd_error_method = gd_stderr_error;
}
static void gdImageBrushApply (gdImagePtr im, int x, int y);
static void gdImageTileApply (gdImagePtr im, int x, int y);
BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y);
/**
* Group: Creation and Destruction
*/
/*
Function: gdImageCreate
gdImageCreate is called to create palette-based images, with no
more than 256 colors. The image must eventually be destroyed using
gdImageDestroy().
Parameters:
sx - The image width.
sy - The image height.
Returns:
A pointer to the new image or NULL if an error occurred.
Example:
(start code)
gdImagePtr im;
im = gdImageCreate(64, 64);
// ... Use the image ...
gdImageDestroy(im);
(end code)
See Also:
<gdImageCreateTrueColor>
*/
BGD_DECLARE(gdImagePtr) gdImageCreate (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char), sx)) {
return NULL;
}
im = (gdImage *) gdCalloc(1, sizeof(gdImage));
if (!im) {
return NULL;
}
/* Row-major ever since gd 1.3 */
im->pixels = (unsigned char **) gdMalloc (sizeof (unsigned char *) * sy);
if (!im->pixels) {
gdFree(im);
return NULL;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
/* Row-major ever since gd 1.3 */
im->pixels[i] = (unsigned char *) gdCalloc (sx, sizeof (unsigned char));
if (!im->pixels[i]) {
for (--i ; i >= 0; i--) {
gdFree(im->pixels[i]);
}
gdFree(im->pixels);
gdFree(im);
return NULL;
}
}
im->sx = sx;
im->sy = sy;
im->colorsTotal = 0;
im->transparent = (-1);
im->interlace = 0;
im->thick = 1;
im->AA = 0;
for (i = 0; (i < gdMaxColors); i++) {
im->open[i] = 1;
};
im->trueColor = 0;
im->tpixels = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
/*
Function: gdImageCreateTrueColor
<gdImageCreateTrueColor> is called to create truecolor images,
with an essentially unlimited number of colors. Invoke
<gdImageCreateTrueColor> with the x and y dimensions of the
desired image. <gdImageCreateTrueColor> returns a <gdImagePtr>
to the new image, or NULL if unable to allocate the image. The
image must eventually be destroyed using <gdImageDestroy>().
Truecolor images are always filled with black at creation
time. There is no concept of a "background" color index.
Parameters:
sx - The image width.
sy - The image height.
Returns:
A pointer to the new image or NULL if an error occurred.
Example:
(start code)
gdImagePtr im;
im = gdImageCreateTrueColor(64, 64);
// ... Use the image ...
gdImageDestroy(im);
(end code)
See Also:
<gdImageCreateTrueColor>
*/
BGD_DECLARE(gdImagePtr) gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (int *), sy)) {
return 0;
}
if (overflow2(sizeof(int), sx)) {
return NULL;
}
im = (gdImage *) gdMalloc (sizeof (gdImage));
if (!im) {
return 0;
}
memset (im, 0, sizeof (gdImage));
im->tpixels = (int **) gdMalloc (sizeof (int *) * sy);
if (!im->tpixels) {
gdFree(im);
return 0;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
im->tpixels[i] = (int *) gdCalloc (sx, sizeof (int));
if (!im->tpixels[i]) {
/* 2.0.34 */
i--;
while (i >= 0) {
gdFree(im->tpixels[i]);
i--;
}
gdFree(im->tpixels);
gdFree(im);
return 0;
}
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
off by default. This allows font antialiasing to work as expected
on the first try in JPEGs -- quite important -- and also allows
for smaller PNGs when saving of alpha channel is not really
desired, which it usually isn't! */
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
im->AA = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
/*
Function: gdImageDestroy
<gdImageDestroy> is used to free the memory associated with an
image. It is important to invoke <gdImageDestroy> before exiting
your program or assigning a new image to a <gdImagePtr> variable.
Parameters:
im - Pointer to the gdImage to delete.
Returns:
Nothing.
Example:
(start code)
gdImagePtr im;
im = gdImageCreate(10, 10);
// ... Use the image ...
// Now destroy it
gdImageDestroy(im);
(end code)
*/
BGD_DECLARE(void) gdImageDestroy (gdImagePtr im)
{
int i;
if (im->pixels) {
for (i = 0; (i < im->sy); i++) {
gdFree (im->pixels[i]);
}
gdFree (im->pixels);
}
if (im->tpixels) {
for (i = 0; (i < im->sy); i++) {
gdFree (im->tpixels[i]);
}
gdFree (im->tpixels);
}
if (im->polyInts) {
gdFree (im->polyInts);
}
if (im->style) {
gdFree (im->style);
}
gdFree (im);
}
/**
* Group: Color
*/
/**
* Function: gdImageColorClosest
*
* Gets the closest color of the image
*
* This is a simplified variant of <gdImageColorClosestAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The closest color already available in the palette for palette images;
* the color value of the given components for truecolor images.
*
* See also:
* - <gdImageColorExact>
*/
BGD_DECLARE(int) gdImageColorClosest (gdImagePtr im, int r, int g, int b)
{
return gdImageColorClosestAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorClosestAlpha
*
* Gets the closest color of the image
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
* a - The value of the alpha component.
*
* Returns:
* The closest color already available in the palette for palette images;
* the color value of the given components for truecolor images.
*
* See also:
* - <gdImageColorExactAlpha>
*/
BGD_DECLARE(int) gdImageColorClosestAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
long rd, gd, bd, ad;
int ct = (-1);
int first = 1;
long mindist = 0;
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
long dist;
if (im->open[i]) {
continue;
}
rd = (im->red[i] - r);
gd = (im->green[i] - g);
bd = (im->blue[i] - b);
/* gd 2.02: whoops, was - b (thanks to David Marwood) */
/* gd 2.16: was blue rather than alpha! Geez! Thanks to
Artur Jakub Jerzak */
ad = (im->alpha[i] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
/* This code is taken from http://www.acm.org/jgt/papers/SmithLyons96/hwb_rgb.html, an article
* on colour conversion to/from RBG and HWB colour systems.
* It has been modified to return the converted value as a * parameter.
*/
#define RETURN_HWB(h, w, b) {HWB->H = h; HWB->W = w; HWB->B = b; return HWB;}
#define RETURN_RGB(r, g, b) {RGB->R = r; RGB->G = g; RGB->B = b; return RGB;}
#define HWB_UNDEFINED -1
#define SETUP_RGB(s, r, g, b) {s.R = r/255.0; s.G = g/255.0; s.B = b/255.0;}
#define MIN(a,b) ((a)<(b)?(a):(b))
#define MIN3(a,b,c) ((a)<(b)?(MIN(a,c)):(MIN(b,c)))
#define MAX(a,b) ((a)<(b)?(b):(a))
#define MAX3(a,b,c) ((a)<(b)?(MAX(b,c)):(MAX(a,c)))
/*
* Theoretically, hue 0 (pure red) is identical to hue 6 in these transforms. Pure
* red always maps to 6 in this implementation. Therefore UNDEFINED can be
* defined as 0 in situations where only unsigned numbers are desired.
*/
typedef struct {
float R, G, B;
}
RGBType;
typedef struct {
float H, W, B;
}
HWBType;
static HWBType *
RGB_to_HWB (RGBType RGB, HWBType * HWB)
{
/*
* RGB are each on [0, 1]. W and B are returned on [0, 1] and H is
* returned on [0, 6]. Exception: H is returned UNDEFINED if W == 1 - B.
*/
float R = RGB.R, G = RGB.G, B = RGB.B, w, v, b, f;
int i;
w = MIN3 (R, G, B);
v = MAX3 (R, G, B);
b = 1 - v;
if (v == w)
RETURN_HWB (HWB_UNDEFINED, w, b);
f = (R == w) ? G - B : ((G == w) ? B - R : R - G);
i = (R == w) ? 3 : ((G == w) ? 5 : 1);
RETURN_HWB (i - f / (v - w), w, b);
}
static float
HWB_Diff (int r1, int g1, int b1, int r2, int g2, int b2)
{
RGBType RGB1, RGB2;
HWBType HWB1, HWB2;
float diff;
SETUP_RGB (RGB1, r1, g1, b1);
SETUP_RGB (RGB2, r2, g2, b2);
RGB_to_HWB (RGB1, &HWB1);
RGB_to_HWB (RGB2, &HWB2);
/*
* I made this bit up; it seems to produce OK results, and it is certainly
* more visually correct than the current RGB metric. (PJW)
*/
if ((HWB1.H == HWB_UNDEFINED) || (HWB2.H == HWB_UNDEFINED)) {
diff = 0; /* Undefined hues always match... */
} else {
diff = fabs (HWB1.H - HWB2.H);
if (diff > 3) {
diff = 6 - diff; /* Remember, it's a colour circle */
}
}
diff =
diff * diff + (HWB1.W - HWB2.W) * (HWB1.W - HWB2.W) + (HWB1.B -
HWB2.B) * (HWB1.B -
HWB2.B);
return diff;
}
#if 0
/*
* This is not actually used, but is here for completeness, in case someone wants to
* use the HWB stuff for anything else...
*/
static RGBType *
HWB_to_RGB (HWBType HWB, RGBType * RGB)
{
/*
* H is given on [0, 6] or UNDEFINED. W and B are given on [0, 1].
* RGB are each returned on [0, 1].
*/
float h = HWB.H, w = HWB.W, b = HWB.B, v, n, f;
int i;
v = 1 - b;
if (h == HWB_UNDEFINED)
RETURN_RGB (v, v, v);
i = floor (h);
f = h - i;
if (i & 1)
f = 1 - f; /* if i is odd */
n = w + f * (v - w); /* linear interpolation between w and v */
switch (i) {
case 6:
case 0:
RETURN_RGB (v, n, w);
case 1:
RETURN_RGB (n, v, w);
case 2:
RETURN_RGB (w, v, n);
case 3:
RETURN_RGB (w, n, v);
case 4:
RETURN_RGB (n, w, v);
case 5:
RETURN_RGB (v, w, n);
}
return RGB;
}
#endif
/*
Function: gdImageColorClosestHWB
*/
BGD_DECLARE(int) gdImageColorClosestHWB (gdImagePtr im, int r, int g, int b)
{
int i;
/* long rd, gd, bd; */
int ct = (-1);
int first = 1;
float mindist = 0;
if (im->trueColor) {
return gdTrueColor (r, g, b);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
float dist;
if (im->open[i]) {
continue;
}
dist = HWB_Diff (im->red[i], im->green[i], im->blue[i], r, g, b);
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
/**
* Function: gdImageColorExact
*
* Gets the exact color of the image
*
* This is a simplified variant of <gdImageColorExactAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The exact color already available in the palette for palette images; if
* there is no exact color, -1 is returned.
* For truecolor images the color value of the given components is returned.
*
* See also:
* - <gdImageColorClosest>
*/
BGD_DECLARE(int) gdImageColorExact (gdImagePtr im, int r, int g, int b)
{
return gdImageColorExactAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorExactAlpha
*
* Gets the exact color of the image
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
* a - The value of the alpha component.
*
* Returns:
* The exact color already available in the palette for palette images; if
* there is no exact color, -1 is returned.
* For truecolor images the color value of the given components is returned.
*
* See also:
* - <gdImageColorClosestAlpha>
* - <gdTrueColorAlpha>
*/
BGD_DECLARE(int) gdImageColorExactAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
if (im->open[i]) {
continue;
}
if ((im->red[i] == r) &&
(im->green[i] == g) && (im->blue[i] == b) && (im->alpha[i] == a)) {
return i;
}
}
return -1;
}
/**
* Function: gdImageColorAllocate
*
* Allocates a color
*
* This is a simplified variant of <gdImageColorAllocateAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The color value.
*
* See also:
* - <gdImageColorDeallocate>
*/
BGD_DECLARE(int) gdImageColorAllocate (gdImagePtr im, int r, int g, int b)
{
return gdImageColorAllocateAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorAllocateAlpha
*
* Allocates a color
*
* This is typically used for palette images, but can be used for truecolor
* images as well.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The color value.
*
* See also:
* - <gdImageColorDeallocate>
*/
BGD_DECLARE(int) gdImageColorAllocateAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
int ct = (-1);
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
if (im->open[i]) {
ct = i;
break;
}
}
if (ct == (-1)) {
ct = im->colorsTotal;
if (ct == gdMaxColors) {
return -1;
}
im->colorsTotal++;
}
im->red[ct] = r;
im->green[ct] = g;
im->blue[ct] = b;
im->alpha[ct] = a;
im->open[ct] = 0;
return ct;
}
/*
Function: gdImageColorResolve
gdImageColorResolve is an alternative for the code fragment
(start code)
if ((color=gdImageColorExact(im,R,G,B)) < 0)
if ((color=gdImageColorAllocate(im,R,G,B)) < 0)
color=gdImageColorClosest(im,R,G,B);
(end code)
in a single function. Its advantage is that it is guaranteed to
return a color index in one search over the color table.
*/
BGD_DECLARE(int) gdImageColorResolve (gdImagePtr im, int r, int g, int b)
{
return gdImageColorResolveAlpha (im, r, g, b, gdAlphaOpaque);
}
/*
Function: gdImageColorResolveAlpha
*/
BGD_DECLARE(int) gdImageColorResolveAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int c;
int ct = -1;
int op = -1;
long rd, gd, bd, ad, dist;
long mindist = 4 * 255 * 255; /* init to max poss dist */
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (c = 0; c < im->colorsTotal; c++) {
if (im->open[c]) {
op = c; /* Save open slot */
continue; /* Color not in use */
}
if (c == im->transparent) {
/* don't ever resolve to the color that has
* been designated as the transparent color */
continue;
}
rd = (long) (im->red[c] - r);
gd = (long) (im->green[c] - g);
bd = (long) (im->blue[c] - b);
ad = (long) (im->alpha[c] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (dist < mindist) {
if (dist == 0) {
return c; /* Return exact match color */
}
mindist = dist;
ct = c;
}
}
/* no exact match. We now know closest, but first try to allocate exact */
if (op == -1) {
op = im->colorsTotal;
if (op == gdMaxColors) {
/* No room for more colors */
return ct; /* Return closest available color */
}
im->colorsTotal++;
}
im->red[op] = r;
im->green[op] = g;
im->blue[op] = b;
im->alpha[op] = a;
im->open[op] = 0;
return op; /* Return newly allocated color */
}
/**
* Function: gdImageColorDeallocate
*
* Removes a palette entry
*
* This is a no-op for truecolor images.
*
* Parameters:
* im - The image.
* color - The palette index.
*
* See also:
* - <gdImageColorAllocate>
* - <gdImageColorAllocateAlpha>
*/
BGD_DECLARE(void) gdImageColorDeallocate (gdImagePtr im, int color)
{
if (im->trueColor || (color >= gdMaxColors) || (color < 0)) {
return;
}
/* Mark it open. */
im->open[color] = 1;
}
/**
* Function: gdImageColorTransparent
*
* Sets the transparent color of the image
*
* Parameter:
* im - The image.
* color - The color.
*
* See also:
* - <gdImageGetTransparent>
*/
BGD_DECLARE(void) gdImageColorTransparent (gdImagePtr im, int color)
{
if (color < 0) {
return;
}
if (!im->trueColor) {
if (color >= gdMaxColors) {
return;
}
if (im->transparent != -1) {
im->alpha[im->transparent] = gdAlphaOpaque;
}
im->alpha[color] = gdAlphaTransparent;
}
im->transparent = color;
}
/*
Function: gdImagePaletteCopy
*/
BGD_DECLARE(void) gdImagePaletteCopy (gdImagePtr to, gdImagePtr from)
{
int i;
int x, y, p;
int xlate[256];
if (to->trueColor) {
return;
}
if (from->trueColor) {
return;
}
for (i = 0; i < 256; i++) {
xlate[i] = -1;
};
for (y = 0; y < (to->sy); y++) {
for (x = 0; x < (to->sx); x++) {
/* Optimization: no gdImageGetPixel */
p = to->pixels[y][x];
if (xlate[p] == -1) {
/* This ought to use HWB, but we don't have an alpha-aware
version of that yet. */
xlate[p] =
gdImageColorClosestAlpha (from, to->red[p], to->green[p],
to->blue[p], to->alpha[p]);
/*printf("Mapping %d (%d, %d, %d, %d) to %d (%d, %d, %d, %d)\n", */
/* p, to->red[p], to->green[p], to->blue[p], to->alpha[p], */
/* xlate[p], from->red[xlate[p]], from->green[xlate[p]], from->blue[xlate[p]], from->alpha[xlate[p]]); */
};
/* Optimization: no gdImageSetPixel */
to->pixels[y][x] = xlate[p];
};
};
for (i = 0; (i < (from->colorsTotal)); i++) {
/*printf("Copying color %d (%d, %d, %d, %d)\n", i, from->red[i], from->blue[i], from->green[i], from->alpha[i]); */
to->red[i] = from->red[i];
to->blue[i] = from->blue[i];
to->green[i] = from->green[i];
to->alpha[i] = from->alpha[i];
to->open[i] = 0;
};
for (i = from->colorsTotal; (i < to->colorsTotal); i++) {
to->open[i] = 1;
};
to->colorsTotal = from->colorsTotal;
}
/*
Function: gdImageColorReplace
*/
BGD_DECLARE(int) gdImageColorReplace (gdImagePtr im, int src, int dst)
{
register int x, y;
int n = 0;
if (src == dst) {
return 0;
}
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
if (pixel(im, x, y) == src) { \
gdImageSetPixel(im, x, y, dst); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
return n;
}
/*
Function: gdImageColorReplaceThreshold
*/
BGD_DECLARE(int) gdImageColorReplaceThreshold (gdImagePtr im, int src, int dst, float threshold)
{
register int x, y;
int n = 0;
if (src == dst) {
return 0;
}
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
if (gdColorMatch(im, src, pixel(im, x, y), threshold)) { \
gdImageSetPixel(im, x, y, dst); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
return n;
}
static int colorCmp (const void *x, const void *y)
{
int a = *(int const *)x;
int b = *(int const *)y;
return (a > b) - (a < b);
}
/*
Function: gdImageColorReplaceArray
*/
BGD_DECLARE(int) gdImageColorReplaceArray (gdImagePtr im, int len, int *src, int *dst)
{
register int x, y;
int c, *d, *base;
int i, n = 0;
if (len <= 0 || src == dst) {
return 0;
}
if (len == 1) {
return gdImageColorReplace(im, src[0], dst[0]);
}
if (overflow2(len, sizeof(int)<<1)) {
return -1;
}
base = (int *)gdMalloc(len * (sizeof(int)<<1));
if (!base) {
return -1;
}
for (i = 0; i < len; i++) {
base[(i<<1)] = src[i];
base[(i<<1)+1] = dst[i];
}
qsort(base, len, sizeof(int)<<1, colorCmp);
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
c = pixel(im, x, y); \
if ( (d = (int *)bsearch(&c, base, len, sizeof(int)<<1, colorCmp)) ) { \
gdImageSetPixel(im, x, y, d[1]); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
gdFree(base);
return n;
}
/*
Function: gdImageColorReplaceCallback
*/
BGD_DECLARE(int) gdImageColorReplaceCallback (gdImagePtr im, gdCallbackImageColor callback)
{
int c, d, n = 0;
if (!callback) {
return 0;
}
if (im->trueColor) {
register int x, y;
for (y = im->cy1; y <= im->cy2; y++) {
for (x = im->cx1; x <= im->cx2; x++) {
c = gdImageTrueColorPixel(im, x, y);
if ( (d = callback(im, c)) != c) {
gdImageSetPixel(im, x, y, d);
n++;
}
}
}
} else { /* palette */
int *sarr, *darr;
int k, len = 0;
sarr = (int *)gdCalloc(im->colorsTotal, sizeof(int));
if (!sarr) {
return -1;
}
for (c = 0; c < im->colorsTotal; c++) {
if (!im->open[c]) {
sarr[len++] = c;
}
}
darr = (int *)gdCalloc(len, sizeof(int));
if (!darr) {
gdFree(sarr);
return -1;
}
for (k = 0; k < len; k++) {
darr[k] = callback(im, sarr[k]);
}
n = gdImageColorReplaceArray(im, k, sarr, darr);
gdFree(darr);
gdFree(sarr);
}
return n;
}
/* 2.0.10: before the drawing routines, some code to clip points that are
* outside the drawing window. Nick Atty (nick@canalplan.org.uk)
*
* This is the Sutherland Hodgman Algorithm, as implemented by
* Duvanenko, Robbins and Gyurcsik - SH(DRG) for short. See Dr Dobb's
* Journal, January 1996, pp107-110 and 116-117
*
* Given the end points of a line, and a bounding rectangle (which we
* know to be from (0,0) to (SX,SY)), adjust the endpoints to be on
* the edges of the rectangle if the line should be drawn at all,
* otherwise return a failure code */
/* this does "one-dimensional" clipping: note that the second time it
is called, all the x parameters refer to height and the y to width
- the comments ignore this (if you can understand it when it's
looking at the X parameters, it should become clear what happens on
the second call!) The code is simplified from that in the article,
as we know that gd images always start at (0,0) */
/* 2.0.26, TBB: we now have to respect a clipping rectangle, it won't
necessarily start at 0. */
static int
clip_1d (int *x0, int *y0, int *x1, int *y1, int mindim, int maxdim)
{
double m; /* gradient of line */
if (*x0 < mindim) {
/* start of line is left of window */
if (*x1 < mindim) /* as is the end, so the line never cuts the window */
return 0;
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
/* adjust x0 to be on the left boundary (ie to be zero), and y0 to match */
*y0 -= (int)(m * (*x0 - mindim));
*x0 = mindim;
/* now, perhaps, adjust the far end of the line as well */
if (*x1 > maxdim) {
*y1 += m * (maxdim - *x1);
*x1 = maxdim;
}
return 1;
}
if (*x0 > maxdim) {
/* start of line is right of window -
complement of above */
if (*x1 > maxdim) /* as is the end, so the line misses the window */
return 0;
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y0 += (int)(m * (maxdim - *x0)); /* adjust so point is on the right
boundary */
*x0 = maxdim;
/* now, perhaps, adjust the end of the line */
if (*x1 < mindim) {
*y1 -= (int)(m * (*x1 - mindim));
*x1 = mindim;
}
return 1;
}
/* the final case - the start of the line is inside the window */
if (*x1 > maxdim) {
/* other end is outside to the right */
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y1 += (int)(m * (maxdim - *x1));
*x1 = maxdim;
return 1;
}
if (*x1 < mindim) {
/* other end is outside to the left */
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y1 -= (int)(m * (*x1 - mindim));
*x1 = mindim;
return 1;
}
/* only get here if both points are inside the window */
return 1;
}
/* end of line clipping code */
/**
* Group: Pixels
*/
/*
Function: gdImageSetPixel
*/
BGD_DECLARE(void) gdImageSetPixel (gdImagePtr im, int x, int y, int color)
{
int p;
switch (color) {
case gdStyled:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
} else {
p = im->style[im->stylePos++];
}
if (p != (gdTransparent)) {
gdImageSetPixel (im, x, y, p);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdStyledBrushed:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
}
p = im->style[im->stylePos++];
if ((p != gdTransparent) && (p != 0)) {
gdImageSetPixel (im, x, y, gdBrushed);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdBrushed:
gdImageBrushApply (im, x, y);
break;
case gdTiled:
gdImageTileApply (im, x, y);
break;
case gdAntiAliased:
/* This shouldn't happen (2.0.26) because we just call
gdImageAALine now, but do something sane. */
gdImageSetPixel(im, x, y, im->AA_color);
break;
default:
if (gdImageBoundsSafeMacro (im, x, y)) {
if (im->trueColor) {
switch (im->alphaBlendingFlag) {
default:
case gdEffectReplace:
im->tpixels[y][x] = color;
break;
case gdEffectAlphaBlend:
case gdEffectNormal:
im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color);
break;
case gdEffectOverlay :
im->tpixels[y][x] = gdLayerOverlay(im->tpixels[y][x], color);
break;
case gdEffectMultiply :
im->tpixels[y][x] = gdLayerMultiply(im->tpixels[y][x], color);
break;
}
} else {
im->pixels[y][x] = color;
}
}
break;
}
}
static void
gdImageBrushApply (gdImagePtr im, int x, int y)
{
int lx, ly;
int hy;
int hx;
int x1, y1, x2, y2;
int srcx, srcy;
if (!im->brush) {
return;
}
hy = gdImageSY (im->brush) / 2;
y1 = y - hy;
y2 = y1 + gdImageSY (im->brush);
hx = gdImageSX (im->brush) / 2;
x1 = x - hx;
x2 = x1 + gdImageSX (im->brush);
srcy = 0;
if (im->trueColor) {
if (im->brush->trueColor) {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, p);
}
srcx++;
}
srcy++;
}
} else {
/* 2.0.12: Brush palette, image truecolor (thanks to Thorben Kundinger
for pointing out the issue) */
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p, tc;
p = gdImageGetPixel (im->brush, srcx, srcy);
tc = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, tc);
}
srcx++;
}
srcy++;
}
}
} else {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetPixel (im->brush, srcx, srcy);
/* Allow for non-square brushes! */
if (p != gdImageGetTransparent (im->brush)) {
/* Truecolor brush. Very slow
on a palette destination. */
if (im->brush->trueColor) {
gdImageSetPixel (im, lx, ly,
gdImageColorResolveAlpha (im,
gdTrueColorGetRed
(p),
gdTrueColorGetGreen
(p),
gdTrueColorGetBlue
(p),
gdTrueColorGetAlpha
(p)));
} else {
gdImageSetPixel (im, lx, ly, im->brushColorMap[p]);
}
}
srcx++;
}
srcy++;
}
}
}
static void
gdImageTileApply (gdImagePtr im, int x, int y)
{
gdImagePtr tile = im->tile;
int srcx, srcy;
int p;
if (!tile) {
return;
}
srcx = x % gdImageSX (tile);
srcy = y % gdImageSY (tile);
if (im->trueColor) {
p = gdImageGetPixel (tile, srcx, srcy);
if (p != gdImageGetTransparent (tile)) {
if (!tile->trueColor) {
p = gdTrueColorAlpha(tile->red[p], tile->green[p], tile->blue[p], tile->alpha[p]);
}
gdImageSetPixel (im, x, y, p);
}
} else {
p = gdImageGetPixel (tile, srcx, srcy);
/* Allow for transparency */
if (p != gdImageGetTransparent (tile)) {
if (tile->trueColor) {
/* Truecolor tile. Very slow
on a palette destination. */
gdImageSetPixel (im, x, y,
gdImageColorResolveAlpha (im,
gdTrueColorGetRed
(p),
gdTrueColorGetGreen
(p),
gdTrueColorGetBlue
(p),
gdTrueColorGetAlpha
(p)));
} else {
gdImageSetPixel (im, x, y, im->tileColorMap[p]);
}
}
}
}
/**
* Function: gdImageGetPixel
*
* Gets a pixel color as stored in the image.
*
* Parameters:
* im - The image.
* x - The x-coordinate.
* y - The y-coordinate.
*
* See also:
* - <gdImageGetTrueColorPixel>
* - <gdImagePalettePixel>
* - <gdImageTrueColorPixel>
*/
BGD_DECLARE(int) gdImageGetPixel (gdImagePtr im, int x, int y)
{
if (gdImageBoundsSafeMacro (im, x, y)) {
if (im->trueColor) {
return im->tpixels[y][x];
} else {
return im->pixels[y][x];
}
} else {
return 0;
}
}
/**
* Function: gdImageGetTrueColorPixel
*
* Gets a pixel color always as truecolor value.
*
* Parameters:
* im - The image.
* x - The x-coordinate.
* y - The y-coordinate.
*
* See also:
* - <gdImageGetPixel>
* - <gdImageTrueColorPixel>
*/
BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y)
{
int p = gdImageGetPixel (im, x, y);
if (!im->trueColor) {
return gdTrueColorAlpha (im->red[p], im->green[p], im->blue[p],
(im->transparent == p) ? gdAlphaTransparent :
im->alpha[p]);
} else {
return p;
}
}
/**
* Group: Primitives
*/
/*
Function: gdImageAABlend
NO-OP, kept for library compatibility.
*/
BGD_DECLARE(void) gdImageAABlend (gdImagePtr im)
{
(void)im;
}
static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col);
static void _gdImageFilledHRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color);
static void gdImageHLine(gdImagePtr im, int y, int x1, int x2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
_gdImageFilledHRectangle(im, x1, y - thickhalf, x2, y + im->thick - thickhalf - 1, col);
} else {
if (x2 < x1) {
int t = x2;
x2 = x1;
x1 = t;
}
for (; x1 <= x2; x1++) {
gdImageSetPixel(im, x1, y, col);
}
}
return;
}
static void gdImageVLine(gdImagePtr im, int x, int y1, int y2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
gdImageFilledRectangle(im, x - thickhalf, y1, x + im->thick - thickhalf - 1, y2, col);
} else {
if (y2 < y1) {
int t = y1;
y1 = y2;
y2 = t;
}
for (; y1 <= y2; y1++) {
gdImageSetPixel(im, x, y1, col);
}
}
return;
}
/*
Function: gdImageLine
Bresenham as presented in Foley & Van Dam.
*/
BGD_DECLARE(void) gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int wid;
int w, wstart;
int thick;
if (color == gdAntiAliased) {
/*
gdAntiAliased passed as color: use the much faster, much cheaper
and equally attractive gdImageAALine implementation. That
clips too, so don't clip twice.
*/
gdImageAALine(im, x1, y1, x2, y2, im->AA_color);
return;
}
/* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no
points need to be drawn. 2.0.26, TBB: clip to edges of clipping
rectangle. We were getting away with this because gdImageSetPixel
is used for actual drawing, but this is still more efficient and opens
the way to skip per-pixel bounds checking in the future. */
if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0)
return;
if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0)
return;
thick = im->thick;
dx = abs (x2 - x1);
dy = abs (y2 - y1);
if (dx == 0) {
gdImageVLine(im, x1, y1, y2, color);
return;
} else if (dy == 0) {
gdImageHLine(im, y1, x1, x2, color);
return;
}
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* Doug Claar: watch out for NaN in atan2 (2.0.5) */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double ac = cos (atan2 (dy, dx));
if (ac != 0) {
wid = thick / ac;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
/* Set up line thickness */
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
}
}
} else {
/* More-or-less vertical. use wid for horizontal stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
if (wid == 0)
wid = 1;
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
/* Set up line thickness */
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
}
}
}
static void dashedSet (gdImagePtr im, int x, int y, int color,
int *onP, int *dashStepP, int wid, int vert);
/*
Function: gdImageDashedLine
*/
BGD_DECLARE(void) gdImageDashedLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int dashStep = 0;
int on = 1;
int wid;
int vert;
int thick = im->thick;
dx = abs (x2 - x1);
dy = abs (y2 - y1);
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
vert = 1;
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
}
} else {
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
vert = 0;
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
}
}
}
static void
dashedSet (gdImagePtr im, int x, int y, int color,
int *onP, int *dashStepP, int wid, int vert)
{
int dashStep = *dashStepP;
int on = *onP;
int w, wstart;
dashStep++;
if (dashStep == gdDashSize) {
dashStep = 0;
on = !on;
}
if (on) {
if (vert) {
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
} else {
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
}
*dashStepP = dashStep;
*onP = on;
}
/*
Function: gdImageBoundsSafe
*/
BGD_DECLARE(int) gdImageBoundsSafe (gdImagePtr im, int x, int y)
{
return gdImageBoundsSafeMacro (im, x, y);
}
/**
* Function: gdImageChar
*
* Draws a single character.
*
* Parameters:
* im - The image to draw onto.
* f - The raster font.
* x - The x coordinate of the upper left pixel.
* y - The y coordinate of the upper left pixel.
* c - The character.
* color - The color.
*
* Variants:
* - <gdImageCharUp>
*
* See also:
* - <gdFontPtr>
*/
BGD_DECLARE(void) gdImageChar (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py < (y + f->h)); py++) {
for (px = x; (px < (x + f->w)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel (im, px, py, color);
}
cx++;
}
cx = 0;
cy++;
}
}
/**
* Function: gdImageCharUp
*/
BGD_DECLARE(void) gdImageCharUp (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py > (y - f->w)); py--) {
for (px = x; (px < (x + f->h)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel (im, px, py, color);
}
cy++;
}
cy = 0;
cx++;
}
}
/**
* Function: gdImageString
*
* Draws a character string.
*
* Parameters:
* im - The image to draw onto.
* f - The raster font.
* x - The x coordinate of the upper left pixel.
* y - The y coordinate of the upper left pixel.
* c - The character string.
* color - The color.
*
* Variants:
* - <gdImageStringUp>
* - <gdImageString16>
* - <gdImageStringUp16>
*
* See also:
* - <gdFontPtr>
* - <gdImageStringTTF>
*/
BGD_DECLARE(void) gdImageString (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageChar (im, f, x, y, s[i], color);
x += f->w;
}
}
/**
* Function: gdImageStringUp
*/
BGD_DECLARE(void) gdImageStringUp (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageCharUp (im, f, x, y, s[i], color);
y -= f->w;
}
}
static int strlen16 (unsigned short *s);
/**
* Function: gdImageString16
*/
BGD_DECLARE(void) gdImageString16 (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16 (s);
for (i = 0; (i < l); i++) {
gdImageChar (im, f, x, y, s[i], color);
x += f->w;
}
}
/**
* Function: gdImageStringUp16
*/
BGD_DECLARE(void) gdImageStringUp16 (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16 (s);
for (i = 0; (i < l); i++) {
gdImageCharUp (im, f, x, y, s[i], color);
y -= f->w;
}
}
static int
strlen16 (unsigned short *s)
{
int len = 0;
while (*s) {
s++;
len++;
}
return len;
}
#ifndef HAVE_LSQRT
/* If you don't have a nice square root function for longs, you can use
** this hack
*/
long
lsqrt (long n)
{
long result = (long) sqrt ((double) n);
return result;
}
#endif
/* s and e are integers modulo 360 (degrees), with 0 degrees
being the rightmost extreme and degrees changing clockwise.
cx and cy are the center in pixels; w and h are the horizontal
and vertical diameter in pixels. */
/*
Function: gdImageArc
*/
BGD_DECLARE(void) gdImageArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e,
int color)
{
gdImageFilledArc (im, cx, cy, w, h, s, e, color, gdNoFill);
}
/*
Function: gdImageFilledArc
*/
BGD_DECLARE(void) gdImageFilledArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e,
int color, int style)
{
gdPoint pts[363];
int i, pti;
int lx = 0, ly = 0;
int fx = 0, fy = 0;
int startx = -1, starty = -1, endx = -1, endy = -1;
if ((s % 360) == (e % 360)) {
s = 0;
e = 360;
} else {
if (s > 360) {
s = s % 360;
}
if (e > 360) {
e = e % 360;
}
while (s < 0) {
s += 360;
}
while (e < s) {
e += 360;
}
if (s == e) {
s = 0;
e = 360;
}
}
for (i = s, pti = 1; (i <= e); i++, pti++) {
int x, y;
x = endx = ((long) gdCosT[i % 360] * (long) w / (2 * 1024)) + cx;
y = endy = ((long) gdSinT[i % 360] * (long) h / (2 * 1024)) + cy;
if (i != s) {
if (!(style & gdChord)) {
if (style & gdNoFill) {
gdImageLine (im, lx, ly, x, y, color);
} else {
if (y == ly) {
pti--; /* don't add this point */
if (((i > 270 || i < 90) && x > lx) || ((i > 90 && i < 270) && x < lx)) {
/* replace the old x coord, if increasing on the
right side or decreasing on the left side */
pts[pti].x = x;
}
} else {
pts[pti].x = x;
pts[pti].y = y;
}
}
}
} else {
fx = x;
fy = y;
if (!(style & (gdChord | gdNoFill))) {
pts[0].x = cx;
pts[0].y = cy;
pts[pti].x = startx = x;
pts[pti].y = starty = y;
}
}
lx = x;
ly = y;
}
if (style & gdChord) {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
gdImageLine (im, fx, fy, lx, ly, color);
} else {
pts[0].x = fx;
pts[0].y = fy;
pts[1].x = lx;
pts[1].y = ly;
pts[2].x = cx;
pts[2].y = cy;
gdImageFilledPolygon (im, pts, 3, color);
}
} else {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
} else {
if (e - s < 360) {
if (pts[1].x != startx && pts[1].y == starty) {
/* start point has been removed due to y-coord fix => insert it */
for (i = pti; i > 1; i--) {
pts[i].x = pts[i-1].x;
pts[i].y = pts[i-1].y;
}
pts[1].x = startx;
pts[1].y = starty;
pti++;
}
if (pts[pti-1].x != endx && pts[pti-1].y == endy) {
/* end point has been removed due to y-coord fix => insert it */
pts[pti].x = endx;
pts[pti].y = endy;
pti++;
}
}
pts[pti].x = cx;
pts[pti].y = cy;
gdImageFilledPolygon(im, pts, pti+1, color);
}
}
}
/*
Function: gdImageEllipse
*/
BGD_DECLARE(void) gdImageEllipse(gdImagePtr im, int mx, int my, int w, int h, int c)
{
int x=0,mx1=0,mx2=0,my1=0,my2=0;
long aq,bq,dx,dy,r,rx,ry,a,b;
a=w>>1;
b=h>>1;
gdImageSetPixel(im,mx+a, my, c);
gdImageSetPixel(im,mx-a, my, c);
mx1 = mx-a;
my1 = my;
mx2 = mx+a;
my2 = my;
aq = a * a;
bq = b * b;
dx = aq << 1;
dy = bq << 1;
r = a * bq;
rx = r << 1;
ry = 0;
x = a;
while (x > 0) {
if (r > 0) {
my1++;
my2--;
ry +=dx;
r -=ry;
}
if (r <= 0) {
x--;
mx1++;
mx2--;
rx -=dy;
r +=rx;
}
gdImageSetPixel(im,mx1, my1, c);
gdImageSetPixel(im,mx1, my2, c);
gdImageSetPixel(im,mx2, my1, c);
gdImageSetPixel(im,mx2, my2, c);
}
}
/*
Function: gdImageFilledEllipse
*/
BGD_DECLARE(void) gdImageFilledEllipse (gdImagePtr im, int mx, int my, int w, int h, int c)
{
int x=0,mx1=0,mx2=0,my1=0,my2=0;
long aq,bq,dx,dy,r,rx,ry,a,b;
int i;
int old_y2;
a=w>>1;
b=h>>1;
for (x = mx-a; x <= mx+a; x++) {
gdImageSetPixel(im, x, my, c);
}
mx1 = mx-a;
my1 = my;
mx2 = mx+a;
my2 = my;
aq = a * a;
bq = b * b;
dx = aq << 1;
dy = bq << 1;
r = a * bq;
rx = r << 1;
ry = 0;
x = a;
old_y2=-2;
while (x > 0) {
if (r > 0) {
my1++;
my2--;
ry +=dx;
r -=ry;
}
if (r <= 0) {
x--;
mx1++;
mx2--;
rx -=dy;
r +=rx;
}
if(old_y2!=my2) {
for(i=mx1; i<=mx2; i++) {
gdImageSetPixel(im,i,my2,c);
gdImageSetPixel(im,i,my1,c);
}
}
old_y2 = my2;
}
}
/*
Function: gdImageFillToBorder
*/
BGD_DECLARE(void) gdImageFillToBorder (gdImagePtr im, int x, int y, int border, int color)
{
int lastBorder;
/* Seek left */
int leftLimit, rightLimit;
int i;
int restoreAlphaBleding;
if (border < 0 || color < 0) {
/* Refuse to fill to a non-solid border */
return;
}
if (!im->trueColor) {
if (color > (im->colorsTotal - 1) || border > (im->colorsTotal - 1)) {
return;
}
}
leftLimit = (-1);
restoreAlphaBleding = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (x >= im->sx) {
x = im->sx - 1;
} else if (x < 0) {
x = 0;
}
if (y >= im->sy) {
y = im->sy - 1;
} else if (y < 0) {
y = 0;
}
for (i = x; (i >= 0); i--) {
if (gdImageGetPixel (im, i, y) == border) {
break;
}
gdImageSetPixel (im, i, y, color);
leftLimit = i;
}
if (leftLimit == (-1)) {
im->alphaBlendingFlag = restoreAlphaBleding;
return;
}
/* Seek right */
rightLimit = x;
for (i = (x + 1); (i < im->sx); i++) {
if (gdImageGetPixel (im, i, y) == border) {
break;
}
gdImageSetPixel (im, i, y, color);
rightLimit = i;
}
/* Look at lines above and below and start paints */
/* Above */
if (y > 0) {
lastBorder = 1;
for (i = leftLimit; (i <= rightLimit); i++) {
int c;
c = gdImageGetPixel (im, i, y - 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder (im, i, y - 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
/* Below */
if (y < ((im->sy) - 1)) {
lastBorder = 1;
for (i = leftLimit; (i <= rightLimit); i++) {
int c = gdImageGetPixel (im, i, y + 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder (im, i, y + 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
im->alphaBlendingFlag = restoreAlphaBleding;
}
/*
* set the pixel at (x,y) and its 4-connected neighbors
* with the same pixel value to the new pixel value nc (new color).
* A 4-connected neighbor: pixel above, below, left, or right of a pixel.
* ideas from comp.graphics discussions.
* For tiled fill, the use of a flag buffer is mandatory. As the tile image can
* contain the same color as the color to fill. To do not bloat normal filling
* code I added a 2nd private function.
*/
static int gdImageTileGet (gdImagePtr im, int x, int y)
{
int srcx, srcy;
int tileColor,p;
if (!im->tile) {
return -1;
}
srcx = x % gdImageSX(im->tile);
srcy = y % gdImageSY(im->tile);
p = gdImageGetPixel(im->tile, srcx, srcy);
if (p == im->tile->transparent) {
tileColor = im->transparent;
} else if (im->trueColor) {
if (im->tile->trueColor) {
tileColor = p;
} else {
tileColor = gdTrueColorAlpha( gdImageRed(im->tile,p), gdImageGreen(im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
} else {
if (im->tile->trueColor) {
tileColor = gdImageColorResolveAlpha(im, gdTrueColorGetRed (p), gdTrueColorGetGreen (p), gdTrueColorGetBlue (p), gdTrueColorGetAlpha (p));
} else {
tileColor = gdImageColorResolveAlpha(im, gdImageRed (im->tile,p), gdImageGreen (im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
}
return tileColor;
}
/* horizontal segment of scan line y */
struct seg {
int y, xl, xr, dy;
};
/* max depth of stack */
#define FILL_MAX ((int)(im->sy*im->sx)/4)
#define FILL_PUSH(Y, XL, XR, DY) \
if (sp<stack+FILL_MAX && Y+(DY)>=0 && Y+(DY)<wy2) \
{sp->y = Y; sp->xl = XL; sp->xr = XR; sp->dy = DY; sp++;}
#define FILL_POP(Y, XL, XR, DY) \
{sp--; Y = sp->y+(DY = sp->dy); XL = sp->xl; XR = sp->xr;}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc);
/*
Function: gdImageFill
*/
BGD_DECLARE(void) gdImageFill(gdImagePtr im, int x, int y, int nc)
{
int l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
int alphablending_bak;
/* stack of filled segments */
/* struct seg stack[FILL_MAX],*sp = stack; */
struct seg *stack;
struct seg *sp;
if (!im->trueColor && nc > (im->colorsTotal - 1)) {
return;
}
alphablending_bak = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (nc==gdTiled) {
_gdImageFillTiled(im,x,y,nc);
im->alphaBlendingFlag = alphablending_bak;
return;
}
wx2=im->sx;
wy2=im->sy;
oc = gdImageGetPixel(im, x, y);
if (oc==nc || x<0 || x>wx2 || y<0 || y>wy2) {
im->alphaBlendingFlag = alphablending_bak;
return;
}
/* Do not use the 4 neighbors implementation with
* small images
*/
if (im->sx < 4) {
int ix = x, iy = y, c;
do {
do {
c = gdImageGetPixel(im, ix, iy);
if (c != oc) {
goto done;
}
gdImageSetPixel(im, ix, iy, nc);
} while(ix++ < (im->sx -1));
ix = x;
} while(iy++ < (im->sy -1));
goto done;
}
if(overflow2(im->sy, im->sx)) {
return;
}
if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) {
return;
}
stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4));
if (!stack) {
return;
}
sp = stack;
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && gdImageGetPixel(im,x, y)==oc; x--) {
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for (; x<=wx2 && gdImageGetPixel(im,x, y)==oc; x++) {
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip:
for (x++; x<=x2 && (gdImageGetPixel(im, x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
gdFree(stack);
done:
im->alphaBlendingFlag = alphablending_bak;
}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc)
{
int l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
/* stack of filled segments */
struct seg *stack;
struct seg *sp;
char *pts;
if (!im->tile) {
return;
}
wx2=im->sx;
wy2=im->sy;
if(overflow2(im->sy, im->sx)) {
return;
}
if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) {
return;
}
pts = (char *) gdCalloc(im->sy * im->sx, sizeof(char));
if (!pts) {
return;
}
stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4));
if (!stack) {
gdFree(pts);
return;
}
sp = stack;
oc = gdImageGetPixel(im, x, y);
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && (!pts[y + x*wy2] && gdImageGetPixel(im,x,y)==oc); x--) {
nc = gdImageTileGet(im,x,y);
pts[y + x*wy2]=1;
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for (; x<wx2 && (!pts[y + x*wy2] && gdImageGetPixel(im,x, y)==oc) ; x++) {
if (pts[y + x*wy2]) {
/* we should never be here */
break;
}
nc = gdImageTileGet(im,x,y);
pts[y + x*wy2]=1;
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip:
for (x++; x<=x2 && (pts[y + x*wy2] || gdImageGetPixel(im,x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
gdFree(pts);
gdFree(stack);
}
/**
* Function: gdImageRectangle
*
* Draws a rectangle.
*
* Parameters:
* im - The image.
* x1 - The x-coordinate of one of the corners.
* y1 - The y-coordinate of one of the corners.
* x2 - The x-coordinate of another corner.
* y2 - The y-coordinate of another corner.
* color - The color.
*
* See also:
* - <gdImageFilledRectangle>
*/
BGD_DECLARE(void) gdImageRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int thick = im->thick;
if (x1 == x2 && y1 == y2 && thick == 1) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (y2 < y1) {
int t = y1;
y1 = y2;
y2 = t;
}
if (x2 < x1) {
int t = x1;
x1 = x2;
x2 = t;
}
if (thick > 1) {
int cx, cy, x1ul, y1ul, x2lr, y2lr;
int half = thick >> 1;
x1ul = x1 - half;
y1ul = y1 - half;
x2lr = x2 + half;
y2lr = y2 + half;
cy = y1ul + thick;
while (cy-- > y1ul) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y2lr - thick;
while (cy++ < y2lr) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x1ul - 1;
while (cx++ < x1ul + thick) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x2lr - thick - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
return;
} else {
if (x1 == x2 || y1 == y2) {
gdImageLine(im, x1, y1, x2, y2, color);
} else {
gdImageLine(im, x1, y1, x2, y1, color);
gdImageLine(im, x1, y2, x2, y2, color);
gdImageLine(im, x1, y1 + 1, x1, y2 - 1, color);
gdImageLine(im, x2, y1 + 1, x2, y2 - 1, color);
}
}
}
static void _gdImageFilledHRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
int x, y;
if (x1 == x2 && y1 == y2) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (x1 > x2) {
x = x1;
x1 = x2;
x2 = x;
}
if (y1 > y2) {
y = y1;
y1 = y2;
y2 = y;
}
if (x1 < 0) {
x1 = 0;
}
if (x2 >= gdImageSX(im)) {
x2 = gdImageSX(im) - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y2 >= gdImageSY(im)) {
y2 = gdImageSY(im) - 1;
}
for (x = x1; (x <= x2); x++) {
for (y = y1; (y <= y2); y++) {
gdImageSetPixel (im, x, y, color);
}
}
}
static void _gdImageFilledVRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
int x, y;
if (x1 == x2 && y1 == y2) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (x1 > x2) {
x = x1;
x1 = x2;
x2 = x;
}
if (y1 > y2) {
y = y1;
y1 = y2;
y2 = y;
}
if (x1 < 0) {
x1 = 0;
}
if (x2 >= gdImageSX(im)) {
x2 = gdImageSX(im) - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y2 >= gdImageSY(im)) {
y2 = gdImageSY(im) - 1;
}
for (y = y1; (y <= y2); y++) {
for (x = x1; (x <= x2); x++) {
gdImageSetPixel (im, x, y, color);
}
}
}
/*
Function: gdImageFilledRectangle
*/
BGD_DECLARE(void) gdImageFilledRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
_gdImageFilledVRectangle(im, x1, y1, x2, y2, color);
}
/**
* Group: Cloning and Copying
*/
/**
* Function: gdImageClone
*
* Clones an image
*
* Creates an exact duplicate of the given image.
*
* Parameters:
* src - The source image.
*
* Returns:
* The cloned image on success, NULL on failure.
*/
BGD_DECLARE(gdImagePtr) gdImageClone (gdImagePtr src) {
gdImagePtr dst;
register int i, x;
if (src->trueColor) {
dst = gdImageCreateTrueColor(src->sx , src->sy);
} else {
dst = gdImageCreate(src->sx , src->sy);
}
if (dst == NULL) {
return NULL;
}
if (src->trueColor == 0) {
dst->colorsTotal = src->colorsTotal;
for (i = 0; i < gdMaxColors; i++) {
dst->red[i] = src->red[i];
dst->green[i] = src->green[i];
dst->blue[i] = src->blue[i];
dst->alpha[i] = src->alpha[i];
dst->open[i] = src->open[i];
}
for (i = 0; i < src->sy; i++) {
for (x = 0; x < src->sx; x++) {
dst->pixels[i][x] = src->pixels[i][x];
}
}
} else {
for (i = 0; i < src->sy; i++) {
for (x = 0; x < src->sx; x++) {
dst->tpixels[i][x] = src->tpixels[i][x];
}
}
}
if (src->styleLength > 0) {
dst->styleLength = src->styleLength;
dst->stylePos = src->stylePos;
for (i = 0; i < src->styleLength; i++) {
dst->style[i] = src->style[i];
}
}
dst->interlace = src->interlace;
dst->alphaBlendingFlag = src->alphaBlendingFlag;
dst->saveAlphaFlag = src->saveAlphaFlag;
dst->AA = src->AA;
dst->AA_color = src->AA_color;
dst->AA_dont_blend = src->AA_dont_blend;
dst->cx1 = src->cx1;
dst->cy1 = src->cy1;
dst->cx2 = src->cx2;
dst->cy2 = src->cy2;
dst->res_x = src->res_x;
dst->res_y = src->res_y;
dst->paletteQuantizationMethod = src->paletteQuantizationMethod;
dst->paletteQuantizationSpeed = src->paletteQuantizationSpeed;
dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality;
dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality;
dst->interpolation_id = src->interpolation_id;
dst->interpolation = src->interpolation;
if (src->brush) {
dst->brush = gdImageClone(src->brush);
}
if (src->tile) {
dst->tile = gdImageClone(src->tile);
}
if (src->style) {
gdImageSetStyle(dst, src->style, src->styleLength);
}
for (i = 0; i < gdMaxColors; i++) {
dst->brushColorMap[i] = src->brushColorMap[i];
dst->tileColorMap[i] = src->tileColorMap[i];
}
if (src->polyAllocated > 0) {
dst->polyAllocated = src->polyAllocated;
for (i = 0; i < src->polyAllocated; i++) {
dst->polyInts[i] = src->polyInts[i];
}
}
return dst;
}
/**
* Function: gdImageCopy
*
* Copy an area of an image to another image
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
*
* See also:
* - <gdImageCopyMerge>
* - <gdImageCopyMergeGray>
*/
BGD_DECLARE(void) gdImageCopy (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX,
int srcY, int w, int h)
{
int c;
int x, y;
int tox, toy;
int i;
int colorMap[gdMaxColors];
if (dst->trueColor) {
/* 2.0: much easier when the destination is truecolor. */
/* 2.0.10: needs a transparent-index check that is still valid if
* * the source is not truecolor. Thanks to Frank Warmerdam.
*/
if (src->trueColor) {
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetTrueColorPixel (src, srcX + x, srcY + y);
if (c != src->transparent) {
gdImageSetPixel (dst, dstX + x, dstY + y, c);
}
}
}
} else {
/* source is palette based */
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetPixel (src, srcX + x, srcY + y);
if (c != src->transparent) {
gdImageSetPixel(dst, dstX + x, dstY + y, gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]));
}
}
}
}
return;
}
for (i = 0; (i < gdMaxColors); i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
int mapTo;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/* Have we established a mapping for this color? */
if (src->trueColor) {
/* 2.05: remap to the palette available in the
destination image. This is slow and
works badly, but it beats crashing! Thanks
to Padhrig McCarthy. */
mapTo = gdImageColorResolveAlpha (dst,
gdTrueColorGetRed (c),
gdTrueColorGetGreen (c),
gdTrueColorGetBlue (c),
gdTrueColorGetAlpha (c));
} else if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Get best match possible. This
function never returns error. */
nc = gdImageColorResolveAlpha (dst,
src->red[c], src->green[c],
src->blue[c], src->alpha[c]);
}
colorMap[c] = nc;
mapTo = colorMap[c];
} else {
mapTo = colorMap[c];
}
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyMerge
*
* Copy an area of an image to another image ignoring alpha
*
* The source area will be copied to the destination are by merging the pixels.
*
* Note:
* This function is a substitute for real alpha channel operations,
* so it doesn't pay attention to the alpha channel.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
* pct - The percentage in range 0..100.
*
* See also:
* - <gdImageCopy>
* - <gdImageCopyMergeGray>
*/
BGD_DECLARE(void) gdImageCopyMerge (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
dc = gdImageGetPixel (dst, tox, toy);
ncR = gdImageRed (src, c) * (pct / 100.0)
+ gdImageRed (dst, dc) * ((100 - pct) / 100.0);
ncG = gdImageGreen (src, c) * (pct / 100.0)
+ gdImageGreen (dst, dc) * ((100 - pct) / 100.0);
ncB = gdImageBlue (src, c) * (pct / 100.0)
+ gdImageBlue (dst, dc) * ((100 - pct) / 100.0);
/* Find a reasonable color */
nc = gdImageColorResolve (dst, ncR, ncG, ncB);
}
gdImageSetPixel (dst, tox, toy, nc);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyMergeGray
*
* Copy an area of an image to another image ignoring alpha
*
* The source area will be copied to the grayscaled destination area by merging
* the pixels.
*
* Note:
* This function is a substitute for real alpha channel operations,
* so it doesn't pay attention to the alpha channel.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
* pct - The percentage of the source color intensity in range 0..100.
*
* See also:
* - <gdImageCopy>
* - <gdImageCopyMerge>
*/
BGD_DECLARE(void) gdImageCopyMergeGray (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
float g;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/*
* If it's the same image, mapping is NOT trivial since we
* merge with greyscale target, but if pct is 100, the grey
* value is not used, so it becomes trivial. pjw 2.0.12.
*/
if (dst == src && pct == 100) {
nc = c;
} else {
dc = gdImageGetPixel (dst, tox, toy);
g = 0.29900 * gdImageRed(dst, dc)
+ 0.58700 * gdImageGreen(dst, dc) + 0.11400 * gdImageBlue(dst, dc);
ncR = gdImageRed (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
ncG = gdImageGreen (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
ncB = gdImageBlue (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
/* First look for an exact match */
nc = gdImageColorExact (dst, ncR, ncG, ncB);
if (nc == (-1)) {
/* No, so try to allocate it */
nc = gdImageColorAllocate (dst, ncR, ncG, ncB);
/* If we're out of colors, go for the
closest color */
if (nc == (-1)) {
nc = gdImageColorClosest (dst, ncR, ncG, ncB);
}
}
}
gdImageSetPixel (dst, tox, toy, nc);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyResized
*
* Copy a resized area from an image to another image
*
* If the source and destination area differ in size, the area will be resized
* using nearest-neighbor interpolation.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* dstW - The width of the area to copy to.
* dstH - The height of the area to copy to.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
*
* See also:
* - <gdImageCopyResampled>
* - <gdImageScale>
*/
BGD_DECLARE(void) gdImageCopyResized (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int dstW, int dstH, int srcW,
int srcH)
{
int c;
int x, y;
int tox, toy;
int ydest;
int i;
int colorMap[gdMaxColors];
/* Stretch vectors */
int *stx;
int *sty;
/* We only need to use floating point to determine the correct
stretch vector for one line's worth. */
if (overflow2(sizeof (int), srcW)) {
return;
}
if (overflow2(sizeof (int), srcH)) {
return;
}
stx = (int *) gdMalloc (sizeof (int) * srcW);
if (!stx) {
return;
}
sty = (int *) gdMalloc (sizeof (int) * srcH);
if (!sty) {
gdFree(stx);
return;
}
/* Fixed by Mao Morimoto 2.0.16 */
for (i = 0; (i < srcW); i++) {
stx[i] = dstW * (i + 1) / srcW - dstW * i / srcW;
}
for (i = 0; (i < srcH); i++) {
sty[i] = dstH * (i + 1) / srcH - dstH * i / srcH;
}
for (i = 0; (i < gdMaxColors); i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; (y < (srcY + srcH)); y++) {
for (ydest = 0; (ydest < sty[y - srcY]); ydest++) {
tox = dstX;
for (x = srcX; (x < (srcX + srcW)); x++) {
int nc = 0;
int mapTo;
if (!stx[x - srcX]) {
continue;
}
if (dst->trueColor) {
/* 2.0.9: Thorben Kundinger: Maybe the source image is not
a truecolor image */
if (!src->trueColor) {
int tmp = gdImageGetPixel (src, x, y);
mapTo = gdImageGetTrueColorPixel (src, x, y);
if (gdImageGetTransparent (src) == tmp) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
} else {
/* TK: old code follows */
mapTo = gdImageGetTrueColorPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == mapTo) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
}
} else {
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox += stx[x - srcX];
continue;
}
if (src->trueColor) {
/* Remap to the palette available in the
destination image. This is slow and
works badly. */
mapTo = gdImageColorResolveAlpha (dst,
gdTrueColorGetRed (c),
gdTrueColorGetGreen
(c),
gdTrueColorGetBlue
(c),
gdTrueColorGetAlpha
(c));
} else {
/* Have we established a mapping for this color? */
if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Find or create the best match */
/* 2.0.5: can't use gdTrueColorGetRed, etc with palette */
nc = gdImageColorResolveAlpha (dst,
gdImageRed (src,
c),
gdImageGreen
(src, c),
gdImageBlue (src,
c),
gdImageAlpha
(src, c));
}
colorMap[c] = nc;
}
mapTo = colorMap[c];
}
}
for (i = 0; (i < stx[x - srcX]); i++) {
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
}
toy++;
}
}
gdFree (stx);
gdFree (sty);
}
/**
* Function: gdImageCopyRotated
*
* Copy a rotated area from an image to another image
*
* The area is counter-clockwise rotated using nearest-neighbor interpolation.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the center of the area to copy to.
* dstY - The y-coordinate of the center of the area to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
* angle - The angle in degrees.
*
* See also:
* - <gdImageRotateInterpolated>
*/
BGD_DECLARE(void) gdImageCopyRotated (gdImagePtr dst,
gdImagePtr src,
double dstX, double dstY,
int srcX, int srcY,
int srcWidth, int srcHeight, int angle)
{
double dx, dy;
double radius = sqrt (srcWidth * srcWidth + srcHeight * srcHeight);
double aCos = cos (angle * .0174532925);
double aSin = sin (angle * .0174532925);
double scX = srcX + ((double) srcWidth) / 2;
double scY = srcY + ((double) srcHeight) / 2;
int cmap[gdMaxColors];
int i;
/*
2.0.34: transparency preservation. The transparentness of
the transparent color is more important than its hue.
*/
if (src->transparent != -1) {
if (dst->transparent == -1) {
dst->transparent = src->transparent;
}
}
for (i = 0; (i < gdMaxColors); i++) {
cmap[i] = (-1);
}
for (dy = dstY - radius; (dy <= dstY + radius); dy++) {
for (dx = dstX - radius; (dx <= dstX + radius); dx++) {
double sxd = (dx - dstX) * aCos - (dy - dstY) * aSin;
double syd = (dy - dstY) * aCos + (dx - dstX) * aSin;
int sx = sxd + scX;
int sy = syd + scY;
if ((sx >= srcX) && (sx < srcX + srcWidth) &&
(sy >= srcY) && (sy < srcY + srcHeight)) {
int c = gdImageGetPixel (src, sx, sy);
/* 2.0.34: transparency wins */
if (c == src->transparent) {
gdImageSetPixel (dst, dx, dy, dst->transparent);
} else if (!src->trueColor) {
/* Use a table to avoid an expensive
lookup on every single pixel */
if (cmap[c] == -1) {
cmap[c] = gdImageColorResolveAlpha (dst,
gdImageRed (src, c),
gdImageGreen (src,
c),
gdImageBlue (src,
c),
gdImageAlpha (src,
c));
}
gdImageSetPixel (dst, dx, dy, cmap[c]);
} else {
gdImageSetPixel (dst,
dx, dy,
gdImageColorResolveAlpha (dst,
gdImageRed (src,
c),
gdImageGreen
(src, c),
gdImageBlue (src,
c),
gdImageAlpha
(src, c)));
}
}
}
}
}
/* When gd 1.x was first created, floating point was to be avoided.
These days it is often faster than table lookups or integer
arithmetic. The routine below is shamelessly, gloriously
floating point. TBB */
/* 2.0.10: cast instead of floor() yields 35% performance improvement.
Thanks to John Buckman. */
#define floor2(exp) ((long) exp)
/*#define floor2(exp) floor(exp)*/
/**
* Function: gdImageCopyResampled
*
* Copy a resampled area from an image to another image
*
* If the source and destination area differ in size, the area will be resized
* using bilinear interpolation for truecolor images, and nearest-neighbor
* interpolation for palette images.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* dstW - The width of the area to copy to.
* dstH - The height of the area to copy to.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
*
* See also:
* - <gdImageCopyResized>
* - <gdImageScale>
*/
BGD_DECLARE(void) gdImageCopyResampled (gdImagePtr dst,
gdImagePtr src,
int dstX, int dstY,
int srcX, int srcY,
int dstW, int dstH, int srcW, int srcH)
{
int x, y;
if (!dst->trueColor) {
gdImageCopyResized (dst, src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH);
return;
}
for (y = dstY; (y < dstY + dstH); y++) {
for (x = dstX; (x < dstX + dstW); x++) {
float sy1, sy2, sx1, sx2;
float sx, sy;
float spixels = 0.0;
float red = 0.0, green = 0.0, blue = 0.0, alpha = 0.0;
float alpha_factor, alpha_sum = 0.0, contrib_sum = 0.0;
sy1 = ((float)(y - dstY)) * (float)srcH / (float)dstH;
sy2 = ((float)(y + 1 - dstY)) * (float) srcH / (float) dstH;
sy = sy1;
do {
float yportion;
if (floorf(sy) == floorf(sy1)) {
yportion = 1.0 - (sy - floorf(sy));
if (yportion > sy2 - sy1) {
yportion = sy2 - sy1;
}
sy = floorf(sy);
} else if (sy == floorf(sy2)) {
yportion = sy2 - floorf(sy2);
} else {
yportion = 1.0;
}
sx1 = ((float)(x - dstX)) * (float) srcW / dstW;
sx2 = ((float)(x + 1 - dstX)) * (float) srcW / dstW;
sx = sx1;
do {
float xportion;
float pcontribution;
int p;
if (floorf(sx) == floorf(sx1)) {
xportion = 1.0 - (sx - floorf(sx));
if (xportion > sx2 - sx1) {
xportion = sx2 - sx1;
}
sx = floorf(sx);
} else if (sx == floorf(sx2)) {
xportion = sx2 - floorf(sx2);
} else {
xportion = 1.0;
}
pcontribution = xportion * yportion;
p = gdImageGetTrueColorPixel(src, (int) sx + srcX, (int) sy + srcY);
alpha_factor = ((gdAlphaMax - gdTrueColorGetAlpha(p))) * pcontribution;
red += gdTrueColorGetRed (p) * alpha_factor;
green += gdTrueColorGetGreen (p) * alpha_factor;
blue += gdTrueColorGetBlue (p) * alpha_factor;
alpha += gdTrueColorGetAlpha (p) * pcontribution;
alpha_sum += alpha_factor;
contrib_sum += pcontribution;
spixels += xportion * yportion;
sx += 1.0;
}
while (sx < sx2);
sy += 1.0f;
}
while (sy < sy2);
if (spixels != 0.0) {
red /= spixels;
green /= spixels;
blue /= spixels;
alpha /= spixels;
}
if ( alpha_sum != 0.0) {
if( contrib_sum != 0.0) {
alpha_sum /= contrib_sum;
}
red /= alpha_sum;
green /= alpha_sum;
blue /= alpha_sum;
}
/* Clamping to allow for rounding errors above */
if (red > 255.0) {
red = 255.0;
}
if (green > 255.0) {
green = 255.0;
}
if (blue > 255.0f) {
blue = 255.0;
}
if (alpha > gdAlphaMax) {
alpha = gdAlphaMax;
}
gdImageSetPixel(dst, x, y, gdTrueColorAlpha ((int) red, (int) green, (int) blue, (int) alpha));
}
}
}
/**
* Group: Polygons
*/
/**
* Function: gdImagePolygon
*
* Draws a closed polygon
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color.
*
* See also:
* - <gdImageOpenPolygon>
* - <gdImageFilledPolygon>
*/
BGD_DECLARE(void) gdImagePolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
if (n <= 0) {
return;
}
gdImageLine (im, p->x, p->y, p[n - 1].x, p[n - 1].y, c);
gdImageOpenPolygon (im, p, n, c);
}
/**
* Function: gdImageOpenPolygon
*
* Draws an open polygon
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color
*
* See also:
* - <gdImagePolygon>
*/
BGD_DECLARE(void) gdImageOpenPolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int lx, ly;
if (n <= 0) {
return;
}
lx = p->x;
ly = p->y;
for (i = 1; (i < n); i++) {
p++;
gdImageLine (im, lx, ly, p->x, p->y, c);
lx = p->x;
ly = p->y;
}
}
/* THANKS to Kirsten Schulz for the polygon fixes! */
/* The intersection finding technique of this code could be improved */
/* by remembering the previous intertersection, and by using the slope. */
/* That could help to adjust intersections to produce a nice */
/* interior_extrema. */
/**
* Function: gdImageFilledPolygon
*
* Draws a filled polygon
*
* The polygon is filled using the even-odd fillrule what can leave unfilled
* regions inside of self-intersecting polygons. This behavior might change in
* a future version.
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color
*
* See also:
* - <gdImagePolygon>
*/
BGD_DECLARE(void) gdImageFilledPolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int j;
int index;
int y;
int miny, maxy, pmaxy;
int x1, y1;
int x2, y2;
int ind1, ind2;
int ints;
int fill_color;
if (n <= 0) {
return;
}
if (c == gdAntiAliased) {
fill_color = im->AA_color;
} else {
fill_color = c;
}
if (!im->polyAllocated) {
if (overflow2(sizeof (int), n)) {
return;
}
im->polyInts = (int *) gdMalloc (sizeof (int) * n);
if (!im->polyInts) {
return;
}
im->polyAllocated = n;
}
if (im->polyAllocated < n) {
while (im->polyAllocated < n) {
im->polyAllocated *= 2;
}
if (overflow2(sizeof (int), im->polyAllocated)) {
return;
}
im->polyInts = (int *) gdReallocEx (im->polyInts,
sizeof (int) * im->polyAllocated);
if (!im->polyInts) {
return;
}
}
miny = p[0].y;
maxy = p[0].y;
for (i = 1; (i < n); i++) {
if (p[i].y < miny) {
miny = p[i].y;
}
if (p[i].y > maxy) {
maxy = p[i].y;
}
}
/* necessary special case: horizontal line */
if (n > 1 && miny == maxy) {
x1 = x2 = p[0].x;
for (i = 1; (i < n); i++) {
if (p[i].x < x1) {
x1 = p[i].x;
} else if (p[i].x > x2) {
x2 = p[i].x;
}
}
gdImageLine(im, x1, miny, x2, miny, c);
return;
}
pmaxy = maxy;
/* 2.0.16: Optimization by Ilia Chipitsine -- don't waste time offscreen */
/* 2.0.26: clipping rectangle is even better */
if (miny < im->cy1) {
miny = im->cy1;
}
if (maxy > im->cy2) {
maxy = im->cy2;
}
/* Fix in 1.3: count a vertex only once */
for (y = miny; (y <= maxy); y++) {
ints = 0;
for (i = 0; (i < n); i++) {
if (!i) {
ind1 = n - 1;
ind2 = 0;
} else {
ind1 = i - 1;
ind2 = i;
}
y1 = p[ind1].y;
y2 = p[ind2].y;
if (y1 < y2) {
x1 = p[ind1].x;
x2 = p[ind2].x;
} else if (y1 > y2) {
y2 = p[ind1].y;
y1 = p[ind2].y;
x2 = p[ind1].x;
x1 = p[ind2].x;
} else {
continue;
}
/* Do the following math as float intermediately, and round to ensure
* that Polygon and FilledPolygon for the same set of points have the
* same footprint. */
if ((y >= y1) && (y < y2)) {
im->polyInts[ints++] = (int) ((float) ((y - y1) * (x2 - x1)) /
(float) (y2 - y1) + 0.5 + x1);
} else if ((y == pmaxy) && (y == y2)) {
im->polyInts[ints++] = x2;
}
}
/*
2.0.26: polygons pretty much always have less than 100 points,
and most of the time they have considerably less. For such trivial
cases, insertion sort is a good choice. Also a good choice for
future implementations that may wish to indirect through a table.
*/
for (i = 1; (i < ints); i++) {
index = im->polyInts[i];
j = i;
while ((j > 0) && (im->polyInts[j - 1] > index)) {
im->polyInts[j] = im->polyInts[j - 1];
j--;
}
im->polyInts[j] = index;
}
for (i = 0; (i < (ints-1)); i += 2) {
/* 2.0.29: back to gdImageLine to prevent segfaults when
performing a pattern fill */
gdImageLine (im, im->polyInts[i], y, im->polyInts[i + 1], y,
fill_color);
}
}
/* If we are drawing this AA, then redraw the border with AA lines. */
/* This doesn't work as well as I'd like, but it doesn't clash either. */
if (c == gdAntiAliased) {
gdImagePolygon (im, p, n, c);
}
}
/**
* Group: other
*/
static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t);
/**
* Function: gdImageSetStyle
*
* Sets the style for following drawing operations
*
* Parameters:
* im - The image.
* style - An array of color values.
* noOfPixel - The number of color values.
*/
BGD_DECLARE(void) gdImageSetStyle (gdImagePtr im, int *style, int noOfPixels)
{
if (im->style) {
gdFree (im->style);
}
if (overflow2(sizeof (int), noOfPixels)) {
return;
}
im->style = (int *) gdMalloc (sizeof (int) * noOfPixels);
if (!im->style) {
return;
}
memcpy (im->style, style, sizeof (int) * noOfPixels);
im->styleLength = noOfPixels;
im->stylePos = 0;
}
/**
* Function: gdImageSetThickness
*
* Sets the thickness for following drawing operations
*
* Parameters:
* im - The image.
* thickness - The thickness in pixels.
*/
BGD_DECLARE(void) gdImageSetThickness (gdImagePtr im, int thickness)
{
im->thick = thickness;
}
/**
* Function: gdImageSetBrush
*
* Sets the brush for following drawing operations
*
* Parameters:
* im - The image.
* brush - The brush image.
*/
BGD_DECLARE(void) gdImageSetBrush (gdImagePtr im, gdImagePtr brush)
{
int i;
im->brush = brush;
if ((!im->trueColor) && (!im->brush->trueColor)) {
for (i = 0; (i < gdImageColorsTotal (brush)); i++) {
int index;
index = gdImageColorResolveAlpha (im,
gdImageRed (brush, i),
gdImageGreen (brush, i),
gdImageBlue (brush, i),
gdImageAlpha (brush, i));
im->brushColorMap[i] = index;
}
}
}
/*
Function: gdImageSetTile
*/
BGD_DECLARE(void) gdImageSetTile (gdImagePtr im, gdImagePtr tile)
{
int i;
im->tile = tile;
if ((!im->trueColor) && (!im->tile->trueColor)) {
for (i = 0; (i < gdImageColorsTotal (tile)); i++) {
int index;
index = gdImageColorResolveAlpha (im,
gdImageRed (tile, i),
gdImageGreen (tile, i),
gdImageBlue (tile, i),
gdImageAlpha (tile, i));
im->tileColorMap[i] = index;
}
}
}
/**
* Function: gdImageSetAntiAliased
*
* Set the color for subsequent anti-aliased drawing
*
* If <gdAntiAliased> is passed as color to drawing operations that support
* anti-aliased drawing (such as <gdImageLine> and <gdImagePolygon>), the actual
* color to be used can be set with this function.
*
* Example: draw an anti-aliased blue line:
* | gdImageSetAntiAliased(im, gdTrueColorAlpha(0, 0, gdBlueMax, gdAlphaOpaque));
* | gdImageLine(im, 10,10, 20,20, gdAntiAliased);
*
* Parameters:
* im - The image.
* c - The color.
*
* See also:
* - <gdImageSetAntiAliasedDontBlend>
*/
BGD_DECLARE(void) gdImageSetAntiAliased (gdImagePtr im, int c)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = -1;
}
/**
* Function: gdImageSetAntiAliasedDontBlend
*
* Set the color and "dont_blend" color for subsequent anti-aliased drawing
*
* This extended variant of <gdImageSetAntiAliased> allows to also specify a
* (background) color that will not be blended in anti-aliased drawing
* operations.
*
* Parameters:
* im - The image.
* c - The color.
* dont_blend - Whether to blend.
*/
BGD_DECLARE(void) gdImageSetAntiAliasedDontBlend (gdImagePtr im, int c, int dont_blend)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = dont_blend;
}
/**
* Function: gdImageInterlace
*
* Sets whether an image is interlaced
*
* This is relevant only when saving the image in a format that supports
* interlacing.
*
* Parameters:
* im - The image.
* interlaceArg - Whether the image is interlaced.
*
* See also:
* - <gdImageGetInterlaced>
*/
BGD_DECLARE(void) gdImageInterlace (gdImagePtr im, int interlaceArg)
{
im->interlace = interlaceArg;
}
/**
* Function: gdImageCompare
*
* Compare two images
*
* Parameters:
* im1 - An image.
* im2 - Another image.
*
* Returns:
* A bitmask of <Image Comparison> flags where each set flag signals
* which attributes of the images are different.
*/
BGD_DECLARE(int) gdImageCompare (gdImagePtr im1, gdImagePtr im2)
{
int x, y;
int p1, p2;
int cmpStatus = 0;
int sx, sy;
if (im1->interlace != im2->interlace) {
cmpStatus |= GD_CMP_INTERLACE;
}
if (im1->transparent != im2->transparent) {
cmpStatus |= GD_CMP_TRANSPARENT;
}
if (im1->trueColor != im2->trueColor) {
cmpStatus |= GD_CMP_TRUECOLOR;
}
sx = im1->sx;
if (im1->sx != im2->sx) {
cmpStatus |= GD_CMP_SIZE_X + GD_CMP_IMAGE;
if (im2->sx < im1->sx) {
sx = im2->sx;
}
}
sy = im1->sy;
if (im1->sy != im2->sy) {
cmpStatus |= GD_CMP_SIZE_Y + GD_CMP_IMAGE;
if (im2->sy < im1->sy) {
sy = im2->sy;
}
}
if (im1->colorsTotal != im2->colorsTotal) {
cmpStatus |= GD_CMP_NUM_COLORS;
}
for (y = 0; (y < sy); y++) {
for (x = 0; (x < sx); x++) {
p1 =
im1->trueColor ? gdImageTrueColorPixel (im1, x,
y) :
gdImagePalettePixel (im1, x, y);
p2 =
im2->trueColor ? gdImageTrueColorPixel (im2, x,
y) :
gdImagePalettePixel (im2, x, y);
if (gdImageRed (im1, p1) != gdImageRed (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageGreen (im1, p1) != gdImageGreen (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageBlue (im1, p1) != gdImageBlue (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#if 0
/* Soon we'll add alpha channel to palettes */
if (gdImageAlpha (im1, p1) != gdImageAlpha (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#endif
}
if (cmpStatus & GD_CMP_COLOR) {
break;
};
}
return cmpStatus;
}
/* Thanks to Frank Warmerdam for this superior implementation
of gdAlphaBlend(), which merges alpha in the
destination color much better. */
/**
* Function: gdAlphaBlend
*
* Blend two colors
*
* Parameters:
* dst - The color to blend onto.
* src - The color to blend.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdLayerOverlay>
* - <gdLayerMultiply>
*/
BGD_DECLARE(int) gdAlphaBlend (int dst, int src)
{
int src_alpha = gdTrueColorGetAlpha(src);
int dst_alpha, alpha, red, green, blue;
int src_weight, dst_weight, tot_weight;
/* -------------------------------------------------------------------- */
/* Simple cases we want to handle fast. */
/* -------------------------------------------------------------------- */
if( src_alpha == gdAlphaOpaque )
return src;
dst_alpha = gdTrueColorGetAlpha(dst);
if( src_alpha == gdAlphaTransparent )
return dst;
if( dst_alpha == gdAlphaTransparent )
return src;
/* -------------------------------------------------------------------- */
/* What will the source and destination alphas be? Note that */
/* the destination weighting is substantially reduced as the */
/* overlay becomes quite opaque. */
/* -------------------------------------------------------------------- */
src_weight = gdAlphaTransparent - src_alpha;
dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax;
tot_weight = src_weight + dst_weight;
/* -------------------------------------------------------------------- */
/* What red, green and blue result values will we use? */
/* -------------------------------------------------------------------- */
alpha = src_alpha * dst_alpha / gdAlphaMax;
red = (gdTrueColorGetRed(src) * src_weight
+ gdTrueColorGetRed(dst) * dst_weight) / tot_weight;
green = (gdTrueColorGetGreen(src) * src_weight
+ gdTrueColorGetGreen(dst) * dst_weight) / tot_weight;
blue = (gdTrueColorGetBlue(src) * src_weight
+ gdTrueColorGetBlue(dst) * dst_weight) / tot_weight;
/* -------------------------------------------------------------------- */
/* Return merged result. */
/* -------------------------------------------------------------------- */
return ((alpha << 24) + (red << 16) + (green << 8) + blue);
}
static int gdAlphaOverlayColor (int src, int dst, int max );
/**
* Function: gdLayerOverlay
*
* Overlay two colors
*
* Parameters:
* dst - The color to overlay onto.
* src - The color to overlay.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdAlphaBlend>
* - <gdLayerMultiply>
*/
BGD_DECLARE(int) gdLayerOverlay (int dst, int src)
{
int a1, a2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(dst);
a2 = gdAlphaMax - gdTrueColorGetAlpha(src);
return ( ((gdAlphaMax - a1*a2/gdAlphaMax) << 24) +
(gdAlphaOverlayColor( gdTrueColorGetRed(src), gdTrueColorGetRed(dst), gdRedMax ) << 16) +
(gdAlphaOverlayColor( gdTrueColorGetGreen(src), gdTrueColorGetGreen(dst), gdGreenMax ) << 8) +
(gdAlphaOverlayColor( gdTrueColorGetBlue(src), gdTrueColorGetBlue(dst), gdBlueMax ))
);
}
/* Apply 'overlay' effect - background pixels are colourised by the foreground colour */
static int gdAlphaOverlayColor (int src, int dst, int max )
{
dst = dst << 1;
if( dst > max ) {
/* in the "light" zone */
return dst + (src << 1) - (dst * src / max) - max;
} else {
/* in the "dark" zone */
return dst * src / max;
}
}
/**
* Function: gdLayerMultiply
*
* Overlay two colors with multiply effect
*
* Parameters:
* dst - The color to overlay onto.
* src - The color to overlay.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdAlphaBlend>
* - <gdLayerOverlay>
*/
BGD_DECLARE(int) gdLayerMultiply (int dst, int src)
{
int a1, a2, r1, r2, g1, g2, b1, b2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(src);
a2 = gdAlphaMax - gdTrueColorGetAlpha(dst);
r1 = gdRedMax - (a1 * (gdRedMax - gdTrueColorGetRed(src))) / gdAlphaMax;
r2 = gdRedMax - (a2 * (gdRedMax - gdTrueColorGetRed(dst))) / gdAlphaMax;
g1 = gdGreenMax - (a1 * (gdGreenMax - gdTrueColorGetGreen(src))) / gdAlphaMax;
g2 = gdGreenMax - (a2 * (gdGreenMax - gdTrueColorGetGreen(dst))) / gdAlphaMax;
b1 = gdBlueMax - (a1 * (gdBlueMax - gdTrueColorGetBlue(src))) / gdAlphaMax;
b2 = gdBlueMax - (a2 * (gdBlueMax - gdTrueColorGetBlue(dst))) / gdAlphaMax ;
a1 = gdAlphaMax - a1;
a2 = gdAlphaMax - a2;
return ( ((a1*a2/gdAlphaMax) << 24) +
((r1*r2/gdRedMax) << 16) +
((g1*g2/gdGreenMax) << 8) +
((b1*b2/gdBlueMax))
);
}
/**
* Function: gdImageAlphaBlending
*
* Set the effect for subsequent drawing operations
*
* Note that the effect is used for truecolor images only.
*
* Parameters:
* im - The image.
* alphaBlendingArg - The effect.
*
* See also:
* - <Effects>
*/
BGD_DECLARE(void) gdImageAlphaBlending (gdImagePtr im, int alphaBlendingArg)
{
im->alphaBlendingFlag = alphaBlendingArg;
}
/**
* Function: gdImageSaveAlpha
*
* Sets the save alpha flag
*
* The save alpha flag specifies whether the alpha channel of the pixels should
* be saved. This is supported only for image formats that support full alpha
* transparency, e.g. PNG.
*/
BGD_DECLARE(void) gdImageSaveAlpha (gdImagePtr im, int saveAlphaArg)
{
im->saveAlphaFlag = saveAlphaArg;
}
/**
* Function: gdImageSetClip
*
* Sets the clipping rectangle
*
* The clipping rectangle restricts the drawing area for following drawing
* operations.
*
* Parameters:
* im - The image.
* x1 - The x-coordinate of the upper left corner.
* y1 - The y-coordinate of the upper left corner.
* x2 - The x-coordinate of the lower right corner.
* y2 - The y-coordinate of the lower right corner.
*
* See also:
* - <gdImageGetClip>
*/
BGD_DECLARE(void) gdImageSetClip (gdImagePtr im, int x1, int y1, int x2, int y2)
{
if (x1 < 0) {
x1 = 0;
}
if (x1 >= im->sx) {
x1 = im->sx - 1;
}
if (x2 < 0) {
x2 = 0;
}
if (x2 >= im->sx) {
x2 = im->sx - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y1 >= im->sy) {
y1 = im->sy - 1;
}
if (y2 < 0) {
y2 = 0;
}
if (y2 >= im->sy) {
y2 = im->sy - 1;
}
im->cx1 = x1;
im->cy1 = y1;
im->cx2 = x2;
im->cy2 = y2;
}
/**
* Function: gdImageGetClip
*
* Gets the current clipping rectangle
*
* Parameters:
* im - The image.
* x1P - (out) The x-coordinate of the upper left corner.
* y1P - (out) The y-coordinate of the upper left corner.
* x2P - (out) The x-coordinate of the lower right corner.
* y2P - (out) The y-coordinate of the lower right corner.
*
* See also:
* - <gdImageSetClip>
*/
BGD_DECLARE(void) gdImageGetClip (gdImagePtr im, int *x1P, int *y1P, int *x2P, int *y2P)
{
*x1P = im->cx1;
*y1P = im->cy1;
*x2P = im->cx2;
*y2P = im->cy2;
}
/**
* Function: gdImageSetResolution
*
* Sets the resolution of an image.
*
* Parameters:
* im - The image.
* res_x - The horizontal resolution in DPI.
* res_y - The vertical resolution in DPI.
*
* See also:
* - <gdImageResolutionX>
* - <gdImageResolutionY>
*/
BGD_DECLARE(void) gdImageSetResolution(gdImagePtr im, const unsigned int res_x, const unsigned int res_y)
{
if (res_x > 0) im->res_x = res_x;
if (res_y > 0) im->res_y = res_y;
}
/*
* Added on 2003/12 by Pierre-Alain Joye (pajoye@pearfr.org)
* */
#define BLEND_COLOR(a, nc, c, cc) \
nc = (cc) + (((((c) - (cc)) * (a)) + ((((c) - (cc)) * (a)) >> 8) + 0x80) >> 8);
static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t)
{
int dr,dg,db,p,r,g,b;
/* 2.0.34: watch out for out of range calls */
if (!gdImageBoundsSafeMacro(im, x, y)) {
return;
}
p = gdImageGetPixel(im,x,y);
/* TBB: we have to implement the dont_blend stuff to provide
the full feature set of the old implementation */
if ((p == color)
|| ((p == im->AA_dont_blend)
&& (t != 0x00))) {
return;
}
dr = gdTrueColorGetRed(color);
dg = gdTrueColorGetGreen(color);
db = gdTrueColorGetBlue(color);
r = gdTrueColorGetRed(p);
g = gdTrueColorGetGreen(p);
b = gdTrueColorGetBlue(p);
BLEND_COLOR(t, dr, r, dr);
BLEND_COLOR(t, dg, g, dg);
BLEND_COLOR(t, db, b, db);
im->tpixels[y][x] = gdTrueColorAlpha(dr, dg, db, gdAlphaOpaque);
}
static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col)
{
/* keep them as 32bits */
long x, y, inc, frac;
long dx, dy,tmp;
int w, wid, wstart;
int thick = im->thick;
if (!im->trueColor) {
/* TBB: don't crash when the image is of the wrong type */
gdImageLine(im, x1, y1, x2, y2, col);
return;
}
/* TBB: use the clipping rectangle */
if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0)
return;
if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0)
return;
dx = x2 - x1;
dy = y2 - y1;
if (dx == 0 && dy == 0) {
/* TBB: allow setting points */
gdImageSetPixel(im, x1, y1, col);
return;
} else {
double ag;
/* Cast the long to an int to avoid compiler warnings about truncation.
* This isn't a problem as computed dy/dx values came from ints above. */
ag = fabs(abs((int)dy) < abs((int)dx) ? cos(atan2(dy, dx)) : sin(atan2(dy, dx)));
if (ag != 0) {
wid = thick / ag;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
}
/* Axis aligned lines */
if (dx == 0) {
gdImageVLine(im, x1, y1, y2, col);
return;
} else if (dy == 0) {
gdImageHLine(im, y1, x1, x2, col);
return;
}
if (abs((int)dx) > abs((int)dy)) {
if (dx < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
y = y1;
inc = (dy * 65536) / dx;
frac = 0;
/* TBB: set the last pixel for consistency (<=) */
for (x = x1 ; x <= x2 ; x++) {
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetAAPixelColor(im, x , w , col , (frac >> 8) & 0xFF);
gdImageSetAAPixelColor(im, x , w + 1 , col, (~frac >> 8) & 0xFF);
}
frac += inc;
if (frac >= 65536) {
frac -= 65536;
y++;
} else if (frac < 0) {
frac += 65536;
y--;
}
}
} else {
if (dy < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
x = x1;
inc = (dx * 65536) / dy;
frac = 0;
/* TBB: set the last pixel for consistency (<=) */
for (y = y1 ; y <= y2 ; y++) {
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetAAPixelColor(im, w , y , col, (frac >> 8) & 0xFF);
gdImageSetAAPixelColor(im, w + 1, y, col, (~frac >> 8) & 0xFF);
}
frac += inc;
if (frac >= 65536) {
frac -= 65536;
x++;
} else if (frac < 0) {
frac += 65536;
x--;
}
}
}
}
/**
* Function: gdImagePaletteToTrueColor
*
* Convert a palette image to true color
*
* Parameters:
* src - The image.
*
* Returns:
* Non-zero if the conversion succeeded, zero otherwise.
*
* See also:
* - <gdImageTrueColorToPalette>
*/
BGD_DECLARE(int) gdImagePaletteToTrueColor(gdImagePtr src)
{
unsigned int y;
unsigned int yy;
if (src == NULL) {
return 0;
}
if (src->trueColor == 1) {
return 1;
} else {
unsigned int x;
const unsigned int sy = gdImageSY(src);
const unsigned int sx = gdImageSX(src);
src->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
if (src->tpixels == NULL) {
return 0;
}
for (y = 0; y < sy; y++) {
const unsigned char *src_row = src->pixels[y];
int * dst_row;
/* no need to calloc it, we overwrite all pxl anyway */
src->tpixels[y] = (int *) gdMalloc(sx * sizeof(int));
if (src->tpixels[y] == NULL) {
goto clean_on_error;
}
dst_row = src->tpixels[y];
for (x = 0; x < sx; x++) {
const unsigned char c = *(src_row + x);
if (c == src->transparent) {
*(dst_row + x) = gdTrueColorAlpha(0, 0, 0, 127);
} else {
*(dst_row + x) = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
}
}
}
/* free old palette buffer (y is sy) */
for (yy = 0; yy < y; yy++) {
gdFree(src->pixels[yy]);
}
gdFree(src->pixels);
src->trueColor = 1;
src->pixels = NULL;
src->alphaBlendingFlag = 0;
src->saveAlphaFlag = 1;
if (src->transparent >= 0) {
const unsigned char c = src->transparent;
src->transparent = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
return 1;
clean_on_error:
/* free new true color buffer (y is not allocated, have failed) */
for (yy = 0; yy < y; yy++) {
gdFree(src->tpixels[yy]);
}
gdFree(src->tpixels);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_272_0 |
crossvul-cpp_data_bad_1847_0 | /*
RFCOMM implementation for Linux Bluetooth stack (BlueZ).
Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com>
Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
/*
* RFCOMM sockets.
*/
#include <linux/export.h>
#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
#include <net/bluetooth/rfcomm.h>
static const struct proto_ops rfcomm_sock_ops;
static struct bt_sock_list rfcomm_sk_list = {
.lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock)
};
static void rfcomm_sock_close(struct sock *sk);
static void rfcomm_sock_kill(struct sock *sk);
/* ---- DLC callbacks ----
*
* called under rfcomm_dlc_lock()
*/
static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb)
{
struct sock *sk = d->owner;
if (!sk)
return;
atomic_add(skb->len, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
}
static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err)
{
struct sock *sk = d->owner, *parent;
unsigned long flags;
if (!sk)
return;
BT_DBG("dlc %p state %ld err %d", d, d->state, err);
local_irq_save(flags);
bh_lock_sock(sk);
if (err)
sk->sk_err = err;
sk->sk_state = d->state;
parent = bt_sk(sk)->parent;
if (parent) {
if (d->state == BT_CLOSED) {
sock_set_flag(sk, SOCK_ZAPPED);
bt_accept_unlink(sk);
}
parent->sk_data_ready(parent);
} else {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session,
&rfcomm_pi(sk)->src, NULL);
sk->sk_state_change(sk);
}
bh_unlock_sock(sk);
local_irq_restore(flags);
if (parent && sock_flag(sk, SOCK_ZAPPED)) {
/* We have to drop DLC lock here, otherwise
* rfcomm_sock_destruct() will dead lock. */
rfcomm_dlc_unlock(d);
rfcomm_sock_kill(sk);
rfcomm_dlc_lock(d);
}
}
/* ---- Socket functions ---- */
static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL;
sk_for_each(sk, &rfcomm_sk_list.head) {
if (rfcomm_pi(sk)->channel != channel)
continue;
if (bacmp(&rfcomm_pi(sk)->src, src))
continue;
if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN)
break;
}
return sk ? sk : NULL;
}
/* Find socket with channel and source bdaddr.
* Returns closest match.
*/
static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src)
{
struct sock *sk = NULL, *sk1 = NULL;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
if (state && sk->sk_state != state)
continue;
if (rfcomm_pi(sk)->channel == channel) {
/* Exact match. */
if (!bacmp(&rfcomm_pi(sk)->src, src))
break;
/* Closest match */
if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY))
sk1 = sk;
}
}
read_unlock(&rfcomm_sk_list.lock);
return sk ? sk : sk1;
}
static void rfcomm_sock_destruct(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p dlc %p", sk, d);
skb_queue_purge(&sk->sk_receive_queue);
skb_queue_purge(&sk->sk_write_queue);
rfcomm_dlc_lock(d);
rfcomm_pi(sk)->dlc = NULL;
/* Detach DLC if it's owned by this socket */
if (d->owner == sk)
d->owner = NULL;
rfcomm_dlc_unlock(d);
rfcomm_dlc_put(d);
}
static void rfcomm_sock_cleanup_listen(struct sock *parent)
{
struct sock *sk;
BT_DBG("parent %p", parent);
/* Close not yet accepted dlcs */
while ((sk = bt_accept_dequeue(parent, NULL))) {
rfcomm_sock_close(sk);
rfcomm_sock_kill(sk);
}
parent->sk_state = BT_CLOSED;
sock_set_flag(parent, SOCK_ZAPPED);
}
/* Kill socket (only if zapped and orphan)
* Must be called on unlocked socket.
*/
static void rfcomm_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);
sock_set_flag(sk, SOCK_DEAD);
sock_put(sk);
}
static void __rfcomm_sock_close(struct sock *sk)
{
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
rfcomm_sock_cleanup_listen(sk);
break;
case BT_CONNECT:
case BT_CONNECT2:
case BT_CONFIG:
case BT_CONNECTED:
rfcomm_dlc_close(d, 0);
default:
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
}
/* Close socket.
* Must be called on unlocked socket.
*/
static void rfcomm_sock_close(struct sock *sk)
{
lock_sock(sk);
__rfcomm_sock_close(sk);
release_sock(sk);
}
static void rfcomm_sock_init(struct sock *sk, struct sock *parent)
{
struct rfcomm_pinfo *pi = rfcomm_pi(sk);
BT_DBG("sk %p", sk);
if (parent) {
sk->sk_type = parent->sk_type;
pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP,
&bt_sk(parent)->flags);
pi->sec_level = rfcomm_pi(parent)->sec_level;
pi->role_switch = rfcomm_pi(parent)->role_switch;
security_sk_clone(parent, sk);
} else {
pi->dlc->defer_setup = 0;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
}
pi->dlc->sec_level = pi->sec_level;
pi->dlc->role_switch = pi->role_switch;
}
static struct proto rfcomm_proto = {
.name = "RFCOMM",
.owner = THIS_MODULE,
.obj_size = sizeof(struct rfcomm_pinfo)
};
static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern)
{
struct rfcomm_dlc *d;
struct sock *sk;
sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern);
if (!sk)
return NULL;
sock_init_data(sock, sk);
INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
d = rfcomm_dlc_alloc(prio);
if (!d) {
sk_free(sk);
return NULL;
}
d->data_ready = rfcomm_sk_data_ready;
d->state_change = rfcomm_sk_state_change;
rfcomm_pi(sk)->dlc = d;
d->owner = sk;
sk->sk_destruct = rfcomm_sock_destruct;
sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT;
sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10;
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
bt_sock_link(&rfcomm_sk_list, sk);
BT_DBG("sk %p", sk);
return sk;
}
static int rfcomm_sock_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
struct sock *sk;
BT_DBG("sock %p", sock);
sock->state = SS_UNCONNECTED;
if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW)
return -ESOCKTNOSUPPORT;
sock->ops = &rfcomm_sock_ops;
sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern);
if (!sk)
return -ENOMEM;
rfcomm_sock_init(sk, NULL);
return 0;
}
static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
int chan = sa->rc_channel;
int err = 0;
BT_DBG("sk %p %pMR", sk, &sa->rc_bdaddr);
if (!addr || addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
write_lock(&rfcomm_sk_list.lock);
if (chan && __rfcomm_get_listen_sock_by_addr(chan, &sa->rc_bdaddr)) {
err = -EADDRINUSE;
} else {
/* Save source address */
bacpy(&rfcomm_pi(sk)->src, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = chan;
sk->sk_state = BT_BOUND;
}
write_unlock(&rfcomm_sk_list.lock);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int err = 0;
BT_DBG("sk %p", sk);
if (alen < sizeof(struct sockaddr_rc) ||
addr->sa_family != AF_BLUETOOTH)
return -EINVAL;
lock_sock(sk);
if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
sk->sk_state = BT_CONNECT;
bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr);
rfcomm_pi(sk)->channel = sa->rc_channel;
d->sec_level = rfcomm_pi(sk)->sec_level;
d->role_switch = rfcomm_pi(sk)->role_switch;
err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
sa->rc_channel);
if (!err)
err = bt_sock_wait_state(sk, BT_CONNECTED,
sock_sndtimeo(sk, flags & O_NONBLOCK));
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sk %p backlog %d", sk, backlog);
lock_sock(sk);
if (sk->sk_state != BT_BOUND) {
err = -EBADFD;
goto done;
}
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
if (!rfcomm_pi(sk)->channel) {
bdaddr_t *src = &rfcomm_pi(sk)->src;
u8 channel;
err = -EINVAL;
write_lock(&rfcomm_sk_list.lock);
for (channel = 1; channel < 31; channel++)
if (!__rfcomm_get_listen_sock_by_addr(channel, src)) {
rfcomm_pi(sk)->channel = channel;
err = 0;
break;
}
write_unlock(&rfcomm_sk_list.lock);
if (err < 0)
goto done;
}
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = BT_LISTEN;
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
struct sock *sk = sock->sk, *nsk;
long timeo;
int err = 0;
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
goto done;
}
timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
BT_DBG("sk %p timeo %ld", sk, timeo);
/* Wait for an incoming connection. (wake-one). */
add_wait_queue_exclusive(sk_sleep(sk), &wait);
while (1) {
if (sk->sk_state != BT_LISTEN) {
err = -EBADFD;
break;
}
nsk = bt_accept_dequeue(sk, newsock);
if (nsk)
break;
if (!timeo) {
err = -EAGAIN;
break;
}
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
break;
}
release_sock(sk);
timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
}
remove_wait_queue(sk_sleep(sk), &wait);
if (err)
goto done;
newsock->state = SS_CONNECTED;
BT_DBG("new socket %p", nsk);
done:
release_sock(sk);
return err;
}
static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
{
struct sockaddr_rc *sa = (struct sockaddr_rc *) addr;
struct sock *sk = sock->sk;
BT_DBG("sock %p, sk %p", sock, sk);
if (peer && sk->sk_state != BT_CONNECTED &&
sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2)
return -ENOTCONN;
memset(sa, 0, sizeof(*sa));
sa->rc_family = AF_BLUETOOTH;
sa->rc_channel = rfcomm_pi(sk)->channel;
if (peer)
bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst);
else
bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src);
*len = sizeof(struct sockaddr_rc);
return 0;
}
static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
struct sk_buff *skb;
int sent;
if (test_bit(RFCOMM_DEFER_SETUP, &d->flags))
return -ENOTCONN;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
if (sk->sk_shutdown & SEND_SHUTDOWN)
return -EPIPE;
BT_DBG("sock %p, sk %p", sock, sk);
lock_sock(sk);
sent = bt_sock_wait_ready(sk, msg->msg_flags);
if (sent)
goto done;
while (len) {
size_t size = min_t(size_t, len, d->mtu);
int err;
skb = sock_alloc_send_skb(sk, size + RFCOMM_SKB_RESERVE,
msg->msg_flags & MSG_DONTWAIT, &err);
if (!skb) {
if (sent == 0)
sent = err;
break;
}
skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE);
err = memcpy_from_msg(skb_put(skb, size), msg, size);
if (err) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
skb->priority = sk->sk_priority;
err = rfcomm_dlc_send(d, skb);
if (err < 0) {
kfree_skb(skb);
if (sent == 0)
sent = err;
break;
}
sent += size;
len -= size;
}
done:
release_sock(sk);
return sent;
}
static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc;
int len;
if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {
rfcomm_dlc_accept(d);
return 0;
}
len = bt_sock_stream_recvmsg(sock, msg, size, flags);
lock_sock(sk);
if (!(flags & MSG_PEEK) && len > 0)
atomic_sub(len, &sk->sk_rmem_alloc);
if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2))
rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc);
release_sock(sk);
return len;
}
static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
int err = 0;
u32 opt;
BT_DBG("sk %p", sk);
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt & RFCOMM_LM_FIPS) {
err = -EINVAL;
break;
}
if (opt & RFCOMM_LM_AUTH)
rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW;
if (opt & RFCOMM_LM_ENCRYPT)
rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
if (opt & RFCOMM_LM_SECURE)
rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH;
rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = BT_SECURITY_LOW;
len = min_t(unsigned int, sizeof(sec), optlen);
if (copy_from_user((char *) &sec, optval, len)) {
err = -EFAULT;
break;
}
if (sec.level > BT_SECURITY_HIGH) {
err = -EINVAL;
break;
}
rfcomm_pi(sk)->sec_level = sec.level;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (get_user(opt, (u32 __user *) optval)) {
err = -EFAULT;
break;
}
if (opt)
set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
else
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct sock *l2cap_sk;
struct l2cap_conn *conn;
struct rfcomm_conninfo cinfo;
int len, err = 0;
u32 opt;
BT_DBG("sk %p", sk);
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case RFCOMM_LM:
switch (rfcomm_pi(sk)->sec_level) {
case BT_SECURITY_LOW:
opt = RFCOMM_LM_AUTH;
break;
case BT_SECURITY_MEDIUM:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT;
break;
case BT_SECURITY_HIGH:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
RFCOMM_LM_SECURE;
break;
case BT_SECURITY_FIPS:
opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT |
RFCOMM_LM_SECURE | RFCOMM_LM_FIPS;
break;
default:
opt = 0;
break;
}
if (rfcomm_pi(sk)->role_switch)
opt |= RFCOMM_LM_MASTER;
if (put_user(opt, (u32 __user *) optval))
err = -EFAULT;
break;
case RFCOMM_CONNINFO:
if (sk->sk_state != BT_CONNECTED &&
!rfcomm_pi(sk)->dlc->defer_setup) {
err = -ENOTCONN;
break;
}
l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
conn = l2cap_pi(l2cap_sk)->chan->conn;
memset(&cinfo, 0, sizeof(cinfo));
cinfo.hci_handle = conn->hcon->handle;
memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
{
struct sock *sk = sock->sk;
struct bt_security sec;
int len, err = 0;
BT_DBG("sk %p", sk);
if (level == SOL_RFCOMM)
return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen);
if (level != SOL_BLUETOOTH)
return -ENOPROTOOPT;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
switch (optname) {
case BT_SECURITY:
if (sk->sk_type != SOCK_STREAM) {
err = -EINVAL;
break;
}
sec.level = rfcomm_pi(sk)->sec_level;
sec.key_size = 0;
len = min_t(unsigned int, len, sizeof(sec));
if (copy_to_user(optval, (char *) &sec, len))
err = -EFAULT;
break;
case BT_DEFER_SETUP:
if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
err = -EINVAL;
break;
}
if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags),
(u32 __user *) optval))
err = -EFAULT;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
return err;
}
static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk __maybe_unused = sock->sk;
int err;
BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
err = bt_sock_ioctl(sock, cmd, arg);
if (err == -ENOIOCTLCMD) {
#ifdef CONFIG_BT_RFCOMM_TTY
lock_sock(sk);
err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg);
release_sock(sk);
#else
err = -EOPNOTSUPP;
#endif
}
return err;
}
static int rfcomm_sock_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
int err = 0;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
lock_sock(sk);
if (!sk->sk_shutdown) {
sk->sk_shutdown = SHUTDOWN_MASK;
__rfcomm_sock_close(sk);
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
!(current->flags & PF_EXITING))
err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
}
release_sock(sk);
return err;
}
static int rfcomm_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
int err;
BT_DBG("sock %p, sk %p", sock, sk);
if (!sk)
return 0;
err = rfcomm_sock_shutdown(sock, 2);
sock_orphan(sk);
rfcomm_sock_kill(sk);
return err;
}
/* ---- RFCOMM core layer callbacks ----
*
* called under rfcomm_lock()
*/
int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d)
{
struct sock *sk, *parent;
bdaddr_t src, dst;
int result = 0;
BT_DBG("session %p channel %d", s, channel);
rfcomm_session_getaddr(s, &src, &dst);
/* Check if we have socket listening on channel */
parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src);
if (!parent)
return 0;
bh_lock_sock(parent);
/* Check for backlog size */
if (sk_acceptq_is_full(parent)) {
BT_DBG("backlog full %d", parent->sk_ack_backlog);
goto done;
}
sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0);
if (!sk)
goto done;
bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM);
rfcomm_sock_init(sk, parent);
bacpy(&rfcomm_pi(sk)->src, &src);
bacpy(&rfcomm_pi(sk)->dst, &dst);
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
bt_accept_enqueue(parent, sk);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
result = 1;
done:
bh_unlock_sock(parent);
if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
parent->sk_state_change(parent);
return result;
}
static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p)
{
struct sock *sk;
read_lock(&rfcomm_sk_list.lock);
sk_for_each(sk, &rfcomm_sk_list.head) {
seq_printf(f, "%pMR %pMR %d %d\n",
&rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst,
sk->sk_state, rfcomm_pi(sk)->channel);
}
read_unlock(&rfcomm_sk_list.lock);
return 0;
}
static int rfcomm_sock_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, rfcomm_sock_debugfs_show, inode->i_private);
}
static const struct file_operations rfcomm_sock_debugfs_fops = {
.open = rfcomm_sock_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *rfcomm_sock_debugfs;
static const struct proto_ops rfcomm_sock_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.release = rfcomm_sock_release,
.bind = rfcomm_sock_bind,
.connect = rfcomm_sock_connect,
.listen = rfcomm_sock_listen,
.accept = rfcomm_sock_accept,
.getname = rfcomm_sock_getname,
.sendmsg = rfcomm_sock_sendmsg,
.recvmsg = rfcomm_sock_recvmsg,
.shutdown = rfcomm_sock_shutdown,
.setsockopt = rfcomm_sock_setsockopt,
.getsockopt = rfcomm_sock_getsockopt,
.ioctl = rfcomm_sock_ioctl,
.poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
.mmap = sock_no_mmap
};
static const struct net_proto_family rfcomm_sock_family_ops = {
.family = PF_BLUETOOTH,
.owner = THIS_MODULE,
.create = rfcomm_sock_create
};
int __init rfcomm_init_sockets(void)
{
int err;
BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr));
err = proto_register(&rfcomm_proto, 0);
if (err < 0)
return err;
err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops);
if (err < 0) {
BT_ERR("RFCOMM socket layer registration failed");
goto error;
}
err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL);
if (err < 0) {
BT_ERR("Failed to create RFCOMM proc file");
bt_sock_unregister(BTPROTO_RFCOMM);
goto error;
}
BT_INFO("RFCOMM socket layer initialized");
if (IS_ERR_OR_NULL(bt_debugfs))
return 0;
rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444,
bt_debugfs, NULL,
&rfcomm_sock_debugfs_fops);
return 0;
error:
proto_unregister(&rfcomm_proto);
return err;
}
void __exit rfcomm_cleanup_sockets(void)
{
bt_procfs_cleanup(&init_net, "rfcomm");
debugfs_remove(rfcomm_sock_debugfs);
bt_sock_unregister(BTPROTO_RFCOMM);
proto_unregister(&rfcomm_proto);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_1847_0 |
crossvul-cpp_data_bad_3060_18 | /* user_defined.c: user defined key type
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/user-type.h>
#include <asm/uaccess.h>
#include "internal.h"
static int logon_vet_description(const char *desc);
/*
* user defined keys take an arbitrary string as the description and an
* arbitrary blob of data as the payload
*/
struct key_type key_type_user = {
.name = "user",
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.update = user_update,
.match = user_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.read = user_read,
};
EXPORT_SYMBOL_GPL(key_type_user);
/*
* This key type is essentially the same as key_type_user, but it does
* not define a .read op. This is suitable for storing username and
* password pairs in the keyring that you do not want to be readable
* from userspace.
*/
struct key_type key_type_logon = {
.name = "logon",
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.update = user_update,
.match = user_match,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.vet_description = logon_vet_description,
};
EXPORT_SYMBOL_GPL(key_type_logon);
/*
* Preparse a user defined key payload
*/
int user_preparse(struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload;
size_t datalen = prep->datalen;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
if (!upayload)
return -ENOMEM;
/* attach the data */
prep->quotalen = datalen;
prep->payload[0] = upayload;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
return 0;
}
EXPORT_SYMBOL_GPL(user_preparse);
/*
* Free a preparse of a user defined key payload
*/
void user_free_preparse(struct key_preparsed_payload *prep)
{
kfree(prep->payload[0]);
}
EXPORT_SYMBOL_GPL(user_free_preparse);
/*
* update a user defined key
* - the key's semaphore is write-locked
*/
int user_update(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload, *zap;
size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
/* construct a replacement payload */
ret = -ENOMEM;
upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
if (!upayload)
goto error;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
/* check the quota and attach the new data */
zap = upayload;
ret = key_payload_reserve(key, datalen);
if (ret == 0) {
/* attach the new data, displacing the old */
zap = key->payload.data;
rcu_assign_keypointer(key, upayload);
key->expiry = 0;
}
if (zap)
kfree_rcu(zap, rcu);
error:
return ret;
}
EXPORT_SYMBOL_GPL(user_update);
/*
* match users on their name
*/
int user_match(const struct key *key, const struct key_match_data *match_data)
{
return strcmp(key->description, match_data->raw_data) == 0;
}
EXPORT_SYMBOL_GPL(user_match);
/*
* dispose of the links from a revoked keyring
* - called with the key sem write-locked
*/
void user_revoke(struct key *key)
{
struct user_key_payload *upayload = key->payload.data;
/* clear the quota */
key_payload_reserve(key, 0);
if (upayload) {
rcu_assign_keypointer(key, NULL);
kfree_rcu(upayload, rcu);
}
}
EXPORT_SYMBOL(user_revoke);
/*
* dispose of the data dangling from the corpse of a user key
*/
void user_destroy(struct key *key)
{
struct user_key_payload *upayload = key->payload.data;
kfree(upayload);
}
EXPORT_SYMBOL_GPL(user_destroy);
/*
* describe the user key
*/
void user_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_instantiated(key))
seq_printf(m, ": %u", key->datalen);
}
EXPORT_SYMBOL_GPL(user_describe);
/*
* read the key data
* - the key's semaphore is read-locked
*/
long user_read(const struct key *key, char __user *buffer, size_t buflen)
{
struct user_key_payload *upayload;
long ret;
upayload = rcu_dereference_key(key);
ret = upayload->datalen;
/* we can return the data as is */
if (buffer && buflen > 0) {
if (buflen > upayload->datalen)
buflen = upayload->datalen;
if (copy_to_user(buffer, upayload->data, buflen) != 0)
ret = -EFAULT;
}
return ret;
}
EXPORT_SYMBOL_GPL(user_read);
/* Vet the description for a "logon" key */
static int logon_vet_description(const char *desc)
{
char *p;
/* require a "qualified" description string */
p = strchr(desc, ':');
if (!p)
return -EINVAL;
/* also reject description with ':' as first char */
if (p == desc)
return -EINVAL;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3060_18 |
crossvul-cpp_data_good_1223_3 | // SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2007 Oracle. All rights reserved.
*/
#include <linux/sched.h>
#include <linux/bio.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
#include <linux/raid/pq.h>
#include <linux/semaphore.h>
#include <linux/uuid.h>
#include <linux/list_sort.h>
#include "ctree.h"
#include "extent_map.h"
#include "disk-io.h"
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
#include "raid56.h"
#include "async-thread.h"
#include "check-integrity.h"
#include "rcu-string.h"
#include "math.h"
#include "dev-replace.h"
#include "sysfs.h"
const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = {
.sub_stripes = 2,
.dev_stripes = 1,
.devs_max = 0, /* 0 == as many as possible */
.devs_min = 4,
.tolerated_failures = 1,
.devs_increment = 2,
.ncopies = 2,
.nparity = 0,
.raid_name = "raid10",
.bg_flag = BTRFS_BLOCK_GROUP_RAID10,
.mindev_error = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
},
[BTRFS_RAID_RAID1] = {
.sub_stripes = 1,
.dev_stripes = 1,
.devs_max = 2,
.devs_min = 2,
.tolerated_failures = 1,
.devs_increment = 2,
.ncopies = 2,
.nparity = 0,
.raid_name = "raid1",
.bg_flag = BTRFS_BLOCK_GROUP_RAID1,
.mindev_error = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
},
[BTRFS_RAID_DUP] = {
.sub_stripes = 1,
.dev_stripes = 2,
.devs_max = 1,
.devs_min = 1,
.tolerated_failures = 0,
.devs_increment = 1,
.ncopies = 2,
.nparity = 0,
.raid_name = "dup",
.bg_flag = BTRFS_BLOCK_GROUP_DUP,
.mindev_error = 0,
},
[BTRFS_RAID_RAID0] = {
.sub_stripes = 1,
.dev_stripes = 1,
.devs_max = 0,
.devs_min = 2,
.tolerated_failures = 0,
.devs_increment = 1,
.ncopies = 1,
.nparity = 0,
.raid_name = "raid0",
.bg_flag = BTRFS_BLOCK_GROUP_RAID0,
.mindev_error = 0,
},
[BTRFS_RAID_SINGLE] = {
.sub_stripes = 1,
.dev_stripes = 1,
.devs_max = 1,
.devs_min = 1,
.tolerated_failures = 0,
.devs_increment = 1,
.ncopies = 1,
.nparity = 0,
.raid_name = "single",
.bg_flag = 0,
.mindev_error = 0,
},
[BTRFS_RAID_RAID5] = {
.sub_stripes = 1,
.dev_stripes = 1,
.devs_max = 0,
.devs_min = 2,
.tolerated_failures = 1,
.devs_increment = 1,
.ncopies = 1,
.nparity = 1,
.raid_name = "raid5",
.bg_flag = BTRFS_BLOCK_GROUP_RAID5,
.mindev_error = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
},
[BTRFS_RAID_RAID6] = {
.sub_stripes = 1,
.dev_stripes = 1,
.devs_max = 0,
.devs_min = 3,
.tolerated_failures = 2,
.devs_increment = 1,
.ncopies = 1,
.nparity = 2,
.raid_name = "raid6",
.bg_flag = BTRFS_BLOCK_GROUP_RAID6,
.mindev_error = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
},
};
const char *get_raid_name(enum btrfs_raid_types type)
{
if (type >= BTRFS_NR_RAID_TYPES)
return NULL;
return btrfs_raid_array[type].raid_name;
}
/*
* Fill @buf with textual description of @bg_flags, no more than @size_buf
* bytes including terminating null byte.
*/
void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
{
int i;
int ret;
char *bp = buf;
u64 flags = bg_flags;
u32 size_bp = size_buf;
if (!flags) {
strcpy(bp, "NONE");
return;
}
#define DESCRIBE_FLAG(flag, desc) \
do { \
if (flags & (flag)) { \
ret = snprintf(bp, size_bp, "%s|", (desc)); \
if (ret < 0 || ret >= size_bp) \
goto out_overflow; \
size_bp -= ret; \
bp += ret; \
flags &= ~(flag); \
} \
} while (0)
DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
btrfs_raid_array[i].raid_name);
#undef DESCRIBE_FLAG
if (flags) {
ret = snprintf(bp, size_bp, "0x%llx|", flags);
size_bp -= ret;
}
if (size_bp < size_buf)
buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
/*
* The text is trimmed, it's up to the caller to provide sufficiently
* large buffer
*/
out_overflow:;
}
static int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
int mirror_num, int need_raid_map);
/*
* Device locking
* ==============
*
* There are several mutexes that protect manipulation of devices and low-level
* structures like chunks but not block groups, extents or files
*
* uuid_mutex (global lock)
* ------------------------
* protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
* the SCAN_DEV ioctl registration or from mount either implicitly (the first
* device) or requested by the device= mount option
*
* the mutex can be very coarse and can cover long-running operations
*
* protects: updates to fs_devices counters like missing devices, rw devices,
* seeding, structure cloning, opening/closing devices at mount/umount time
*
* global::fs_devs - add, remove, updates to the global list
*
* does not protect: manipulation of the fs_devices::devices list!
*
* btrfs_device::name - renames (write side), read is RCU
*
* fs_devices::device_list_mutex (per-fs, with RCU)
* ------------------------------------------------
* protects updates to fs_devices::devices, ie. adding and deleting
*
* simple list traversal with read-only actions can be done with RCU protection
*
* may be used to exclude some operations from running concurrently without any
* modifications to the list (see write_all_supers)
*
* balance_mutex
* -------------
* protects balance structures (status, state) and context accessed from
* several places (internally, ioctl)
*
* chunk_mutex
* -----------
* protects chunks, adding or removing during allocation, trim or when a new
* device is added/removed
*
* cleaner_mutex
* -------------
* a big lock that is held by the cleaner thread and prevents running subvolume
* cleaning together with relocation or delayed iputs
*
*
* Lock nesting
* ============
*
* uuid_mutex
* volume_mutex
* device_list_mutex
* chunk_mutex
* balance_mutex
*
*
* Exclusive operations, BTRFS_FS_EXCL_OP
* ======================================
*
* Maintains the exclusivity of the following operations that apply to the
* whole filesystem and cannot run in parallel.
*
* - Balance (*)
* - Device add
* - Device remove
* - Device replace (*)
* - Resize
*
* The device operations (as above) can be in one of the following states:
*
* - Running state
* - Paused state
* - Completed state
*
* Only device operations marked with (*) can go into the Paused state for the
* following reasons:
*
* - ioctl (only Balance can be Paused through ioctl)
* - filesystem remounted as read-only
* - filesystem unmounted and mounted as read-only
* - system power-cycle and filesystem mounted as read-only
* - filesystem or device errors leading to forced read-only
*
* BTRFS_FS_EXCL_OP flag is set and cleared using atomic operations.
* During the course of Paused state, the BTRFS_FS_EXCL_OP remains set.
* A device operation in Paused or Running state can be canceled or resumed
* either by ioctl (Balance only) or when remounted as read-write.
* BTRFS_FS_EXCL_OP flag is cleared when the device operation is canceled or
* completed.
*/
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
struct list_head *btrfs_get_fs_uuids(void)
{
return &fs_uuids;
}
/*
* alloc_fs_devices - allocate struct btrfs_fs_devices
* @fsid: if not NULL, copy the UUID to fs_devices::fsid
* @metadata_fsid: if not NULL, copy the UUID to fs_devices::metadata_fsid
*
* Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
* The returned struct is not linked onto any lists and can be destroyed with
* kfree() right away.
*/
static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
const u8 *metadata_fsid)
{
struct btrfs_fs_devices *fs_devs;
fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
if (!fs_devs)
return ERR_PTR(-ENOMEM);
mutex_init(&fs_devs->device_list_mutex);
INIT_LIST_HEAD(&fs_devs->devices);
INIT_LIST_HEAD(&fs_devs->resized_devices);
INIT_LIST_HEAD(&fs_devs->alloc_list);
INIT_LIST_HEAD(&fs_devs->fs_list);
if (fsid)
memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
if (metadata_fsid)
memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
else if (fsid)
memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
return fs_devs;
}
void btrfs_free_device(struct btrfs_device *device)
{
rcu_string_free(device->name);
bio_put(device->flush_bio);
kfree(device);
}
static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device;
WARN_ON(fs_devices->opened);
while (!list_empty(&fs_devices->devices)) {
device = list_entry(fs_devices->devices.next,
struct btrfs_device, dev_list);
list_del(&device->dev_list);
btrfs_free_device(device);
}
kfree(fs_devices);
}
static void btrfs_kobject_uevent(struct block_device *bdev,
enum kobject_action action)
{
int ret;
ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action);
if (ret)
pr_warn("BTRFS: Sending event '%d' to kobject: '%s' (%p): failed\n",
action,
kobject_name(&disk_to_dev(bdev->bd_disk)->kobj),
&disk_to_dev(bdev->bd_disk)->kobj);
}
void __exit btrfs_cleanup_fs_uuids(void)
{
struct btrfs_fs_devices *fs_devices;
while (!list_empty(&fs_uuids)) {
fs_devices = list_entry(fs_uuids.next,
struct btrfs_fs_devices, fs_list);
list_del(&fs_devices->fs_list);
free_fs_devices(fs_devices);
}
}
/*
* Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
* Returned struct is not linked onto any lists and must be destroyed using
* btrfs_free_device.
*/
static struct btrfs_device *__alloc_device(void)
{
struct btrfs_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
/*
* Preallocate a bio that's always going to be used for flushing device
* barriers and matches the device lifespan
*/
dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
if (!dev->flush_bio) {
kfree(dev);
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_alloc_list);
INIT_LIST_HEAD(&dev->resized_list);
spin_lock_init(&dev->io_lock);
atomic_set(&dev->reada_in_flight, 0);
atomic_set(&dev->dev_stats_ccnt, 0);
btrfs_device_data_ordered_init(dev);
INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
return dev;
}
static noinline struct btrfs_fs_devices *find_fsid(
const u8 *fsid, const u8 *metadata_fsid)
{
struct btrfs_fs_devices *fs_devices;
ASSERT(fsid);
if (metadata_fsid) {
/*
* Handle scanned device having completed its fsid change but
* belonging to a fs_devices that was created by first scanning
* a device which didn't have its fsid/metadata_uuid changed
* at all and the CHANGING_FSID_V2 flag set.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (fs_devices->fsid_change &&
memcmp(metadata_fsid, fs_devices->fsid,
BTRFS_FSID_SIZE) == 0 &&
memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) == 0) {
return fs_devices;
}
}
/*
* Handle scanned device having completed its fsid change but
* belonging to a fs_devices that was created by a device that
* has an outdated pair of fsid/metadata_uuid and
* CHANGING_FSID_V2 flag set.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (fs_devices->fsid_change &&
memcmp(fs_devices->metadata_uuid,
fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
memcmp(metadata_fsid, fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) == 0) {
return fs_devices;
}
}
}
/* Handle non-split brain cases */
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (metadata_fsid) {
if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
&& memcmp(metadata_fsid, fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) == 0)
return fs_devices;
} else {
if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
return fs_devices;
}
}
return NULL;
}
static int
btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
int flush, struct block_device **bdev,
struct buffer_head **bh)
{
int ret;
*bdev = blkdev_get_by_path(device_path, flags, holder);
if (IS_ERR(*bdev)) {
ret = PTR_ERR(*bdev);
goto error;
}
if (flush)
filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
if (ret) {
blkdev_put(*bdev, flags);
goto error;
}
invalidate_bdev(*bdev);
*bh = btrfs_read_dev_super(*bdev);
if (IS_ERR(*bh)) {
ret = PTR_ERR(*bh);
blkdev_put(*bdev, flags);
goto error;
}
return 0;
error:
*bdev = NULL;
*bh = NULL;
return ret;
}
static void requeue_list(struct btrfs_pending_bios *pending_bios,
struct bio *head, struct bio *tail)
{
struct bio *old_head;
old_head = pending_bios->head;
pending_bios->head = head;
if (pending_bios->tail)
tail->bi_next = old_head;
else
pending_bios->tail = tail;
}
/*
* we try to collect pending bios for a device so we don't get a large
* number of procs sending bios down to the same device. This greatly
* improves the schedulers ability to collect and merge the bios.
*
* But, it also turns into a long list of bios to process and that is sure
* to eventually make the worker thread block. The solution here is to
* make some progress and then put this work struct back at the end of
* the list if the block device is congested. This way, multiple devices
* can make progress from a single worker thread.
*/
static noinline void run_scheduled_bios(struct btrfs_device *device)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct bio *pending;
struct backing_dev_info *bdi;
struct btrfs_pending_bios *pending_bios;
struct bio *tail;
struct bio *cur;
int again = 0;
unsigned long num_run;
unsigned long batch_run = 0;
unsigned long last_waited = 0;
int force_reg = 0;
int sync_pending = 0;
struct blk_plug plug;
/*
* this function runs all the bios we've collected for
* a particular device. We don't want to wander off to
* another device without first sending all of these down.
* So, setup a plug here and finish it off before we return
*/
blk_start_plug(&plug);
bdi = device->bdev->bd_bdi;
loop:
spin_lock(&device->io_lock);
loop_lock:
num_run = 0;
/* take all the bios off the list at once and process them
* later on (without the lock held). But, remember the
* tail and other pointers so the bios can be properly reinserted
* into the list if we hit congestion
*/
if (!force_reg && device->pending_sync_bios.head) {
pending_bios = &device->pending_sync_bios;
force_reg = 1;
} else {
pending_bios = &device->pending_bios;
force_reg = 0;
}
pending = pending_bios->head;
tail = pending_bios->tail;
WARN_ON(pending && !tail);
/*
* if pending was null this time around, no bios need processing
* at all and we can stop. Otherwise it'll loop back up again
* and do an additional check so no bios are missed.
*
* device->running_pending is used to synchronize with the
* schedule_bio code.
*/
if (device->pending_sync_bios.head == NULL &&
device->pending_bios.head == NULL) {
again = 0;
device->running_pending = 0;
} else {
again = 1;
device->running_pending = 1;
}
pending_bios->head = NULL;
pending_bios->tail = NULL;
spin_unlock(&device->io_lock);
while (pending) {
rmb();
/* we want to work on both lists, but do more bios on the
* sync list than the regular list
*/
if ((num_run > 32 &&
pending_bios != &device->pending_sync_bios &&
device->pending_sync_bios.head) ||
(num_run > 64 && pending_bios == &device->pending_sync_bios &&
device->pending_bios.head)) {
spin_lock(&device->io_lock);
requeue_list(pending_bios, pending, tail);
goto loop_lock;
}
cur = pending;
pending = pending->bi_next;
cur->bi_next = NULL;
BUG_ON(atomic_read(&cur->__bi_cnt) == 0);
/*
* if we're doing the sync list, record that our
* plug has some sync requests on it
*
* If we're doing the regular list and there are
* sync requests sitting around, unplug before
* we add more
*/
if (pending_bios == &device->pending_sync_bios) {
sync_pending = 1;
} else if (sync_pending) {
blk_finish_plug(&plug);
blk_start_plug(&plug);
sync_pending = 0;
}
btrfsic_submit_bio(cur);
num_run++;
batch_run++;
cond_resched();
/*
* we made progress, there is more work to do and the bdi
* is now congested. Back off and let other work structs
* run instead
*/
if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
fs_info->fs_devices->open_devices > 1) {
struct io_context *ioc;
ioc = current->io_context;
/*
* the main goal here is that we don't want to
* block if we're going to be able to submit
* more requests without blocking.
*
* This code does two great things, it pokes into
* the elevator code from a filesystem _and_
* it makes assumptions about how batching works.
*/
if (ioc && ioc->nr_batch_requests > 0 &&
time_before(jiffies, ioc->last_waited + HZ/50UL) &&
(last_waited == 0 ||
ioc->last_waited == last_waited)) {
/*
* we want to go through our batch of
* requests and stop. So, we copy out
* the ioc->last_waited time and test
* against it before looping
*/
last_waited = ioc->last_waited;
cond_resched();
continue;
}
spin_lock(&device->io_lock);
requeue_list(pending_bios, pending, tail);
device->running_pending = 1;
spin_unlock(&device->io_lock);
btrfs_queue_work(fs_info->submit_workers,
&device->work);
goto done;
}
}
cond_resched();
if (again)
goto loop;
spin_lock(&device->io_lock);
if (device->pending_bios.head || device->pending_sync_bios.head)
goto loop_lock;
spin_unlock(&device->io_lock);
done:
blk_finish_plug(&plug);
}
static void pending_bios_fn(struct btrfs_work *work)
{
struct btrfs_device *device;
device = container_of(work, struct btrfs_device, work);
run_scheduled_bios(device);
}
static bool device_path_matched(const char *path, struct btrfs_device *device)
{
int found;
rcu_read_lock();
found = strcmp(rcu_str_deref(device->name), path);
rcu_read_unlock();
return found == 0;
}
/*
* Search and remove all stale (devices which are not mounted) devices.
* When both inputs are NULL, it will search and release all stale devices.
* path: Optional. When provided will it release all unmounted devices
* matching this path only.
* skip_dev: Optional. Will skip this device when searching for the stale
* devices.
* Return: 0 for success or if @path is NULL.
* -EBUSY if @path is a mounted device.
* -ENOENT if @path does not match any device in the list.
*/
static int btrfs_free_stale_devices(const char *path,
struct btrfs_device *skip_device)
{
struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
struct btrfs_device *device, *tmp_device;
int ret = 0;
if (path)
ret = -ENOENT;
list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp_device,
&fs_devices->devices, dev_list) {
if (skip_device && skip_device == device)
continue;
if (path && !device->name)
continue;
if (path && !device_path_matched(path, device))
continue;
if (fs_devices->opened) {
/* for an already deleted device return 0 */
if (path && ret != 0)
ret = -EBUSY;
break;
}
/* delete the stale device */
fs_devices->num_devices--;
list_del(&device->dev_list);
btrfs_free_device(device);
ret = 0;
if (fs_devices->num_devices == 0)
break;
}
mutex_unlock(&fs_devices->device_list_mutex);
if (fs_devices->num_devices == 0) {
btrfs_sysfs_remove_fsid(fs_devices);
list_del(&fs_devices->fs_list);
free_fs_devices(fs_devices);
}
}
return ret;
}
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
struct btrfs_device *device, fmode_t flags,
void *holder)
{
struct request_queue *q;
struct block_device *bdev;
struct buffer_head *bh;
struct btrfs_super_block *disk_super;
u64 devid;
int ret;
if (device->bdev)
return -EINVAL;
if (!device->name)
return -EINVAL;
ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
&bdev, &bh);
if (ret)
return ret;
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
if (devid != device->devid)
goto error_brelse;
if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
goto error_brelse;
device->generation = btrfs_super_generation(disk_super);
if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
if (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
pr_err(
"BTRFS: Invalid seeding and uuid-changed device detected\n");
goto error_brelse;
}
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
fs_devices->seeding = 1;
} else {
if (bdev_read_only(bdev))
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
else
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
}
q = bdev_get_queue(bdev);
if (!blk_queue_nonrot(q))
fs_devices->rotating = 1;
device->bdev = bdev;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
device->mode = flags;
fs_devices->open_devices++;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
fs_devices->rw_devices++;
list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
}
brelse(bh);
return 0;
error_brelse:
brelse(bh);
blkdev_put(bdev, flags);
return -EINVAL;
}
/*
* Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
* being created with a disk that has already completed its fsid change.
*/
static struct btrfs_fs_devices *find_fsid_inprogress(
struct btrfs_super_block *disk_super)
{
struct btrfs_fs_devices *fs_devices;
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
BTRFS_FSID_SIZE) != 0 &&
memcmp(fs_devices->metadata_uuid, disk_super->fsid,
BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
return fs_devices;
}
}
return NULL;
}
static struct btrfs_fs_devices *find_fsid_changed(
struct btrfs_super_block *disk_super)
{
struct btrfs_fs_devices *fs_devices;
/*
* Handles the case where scanned device is part of an fs that had
* multiple successful changes of FSID but curently device didn't
* observe it. Meaning our fsid will be different than theirs.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
BTRFS_FSID_SIZE) != 0 &&
memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
BTRFS_FSID_SIZE) == 0 &&
memcmp(fs_devices->fsid, disk_super->fsid,
BTRFS_FSID_SIZE) != 0) {
return fs_devices;
}
}
return NULL;
}
/*
* Add new device to list of registered devices
*
* Returns:
* device pointer which was just added or updated when successful
* error pointer when failed
*/
static noinline struct btrfs_device *device_list_add(const char *path,
struct btrfs_super_block *disk_super,
bool *new_device_added)
{
struct btrfs_device *device;
struct btrfs_fs_devices *fs_devices = NULL;
struct rcu_string *name;
u64 found_transid = btrfs_super_generation(disk_super);
u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
if (fsid_change_in_progress) {
if (!has_metadata_uuid) {
/*
* When we have an image which has CHANGING_FSID_V2 set
* it might belong to either a filesystem which has
* disks with completed fsid change or it might belong
* to fs with no UUID changes in effect, handle both.
*/
fs_devices = find_fsid_inprogress(disk_super);
if (!fs_devices)
fs_devices = find_fsid(disk_super->fsid, NULL);
} else {
fs_devices = find_fsid_changed(disk_super);
}
} else if (has_metadata_uuid) {
fs_devices = find_fsid(disk_super->fsid,
disk_super->metadata_uuid);
} else {
fs_devices = find_fsid(disk_super->fsid, NULL);
}
if (!fs_devices) {
if (has_metadata_uuid)
fs_devices = alloc_fs_devices(disk_super->fsid,
disk_super->metadata_uuid);
else
fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
if (IS_ERR(fs_devices))
return ERR_CAST(fs_devices);
fs_devices->fsid_change = fsid_change_in_progress;
mutex_lock(&fs_devices->device_list_mutex);
list_add(&fs_devices->fs_list, &fs_uuids);
device = NULL;
} else {
mutex_lock(&fs_devices->device_list_mutex);
device = btrfs_find_device(fs_devices, devid,
disk_super->dev_item.uuid, NULL, false);
/*
* If this disk has been pulled into an fs devices created by
* a device which had the CHANGING_FSID_V2 flag then replace the
* metadata_uuid/fsid values of the fs_devices.
*/
if (has_metadata_uuid && fs_devices->fsid_change &&
found_transid > fs_devices->latest_generation) {
memcpy(fs_devices->fsid, disk_super->fsid,
BTRFS_FSID_SIZE);
memcpy(fs_devices->metadata_uuid,
disk_super->metadata_uuid, BTRFS_FSID_SIZE);
fs_devices->fsid_change = false;
}
}
if (!device) {
if (fs_devices->opened) {
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EBUSY);
}
device = btrfs_alloc_device(NULL, &devid,
disk_super->dev_item.uuid);
if (IS_ERR(device)) {
mutex_unlock(&fs_devices->device_list_mutex);
/* we can safely leave the fs_devices entry around */
return device;
}
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
btrfs_free_device(device);
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
}
rcu_assign_pointer(device->name, name);
list_add_rcu(&device->dev_list, &fs_devices->devices);
fs_devices->num_devices++;
device->fs_devices = fs_devices;
*new_device_added = true;
if (disk_super->label[0])
pr_info("BTRFS: device label %s devid %llu transid %llu %s\n",
disk_super->label, devid, found_transid, path);
else
pr_info("BTRFS: device fsid %pU devid %llu transid %llu %s\n",
disk_super->fsid, devid, found_transid, path);
} else if (!device->name || strcmp(device->name->str, path)) {
/*
* When FS is already mounted.
* 1. If you are here and if the device->name is NULL that
* means this device was missing at time of FS mount.
* 2. If you are here and if the device->name is different
* from 'path' that means either
* a. The same device disappeared and reappeared with
* different name. or
* b. The missing-disk-which-was-replaced, has
* reappeared now.
*
* We must allow 1 and 2a above. But 2b would be a spurious
* and unintentional.
*
* Further in case of 1 and 2a above, the disk at 'path'
* would have missed some transaction when it was away and
* in case of 2a the stale bdev has to be updated as well.
* 2b must not be allowed at all time.
*/
/*
* For now, we do allow update to btrfs_fs_device through the
* btrfs dev scan cli after FS has been mounted. We're still
* tracking a problem where systems fail mount by subvolume id
* when we reject replacement on a mounted FS.
*/
if (!fs_devices->opened && found_transid < device->generation) {
/*
* That is if the FS is _not_ mounted and if you
* are here, that means there is more than one
* disk with same uuid and devid.We keep the one
* with larger generation number or the last-in if
* generation are equal.
*/
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-EEXIST);
}
/*
* We are going to replace the device path for a given devid,
* make sure it's the same device if the device is mounted
*/
if (device->bdev) {
struct block_device *path_bdev;
path_bdev = lookup_bdev(path);
if (IS_ERR(path_bdev)) {
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_CAST(path_bdev);
}
if (device->bdev != path_bdev) {
bdput(path_bdev);
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_warn_in_rcu(device->fs_info,
"duplicate device fsid:devid for %pU:%llu old:%s new:%s",
disk_super->fsid, devid,
rcu_str_deref(device->name), path);
return ERR_PTR(-EEXIST);
}
bdput(path_bdev);
btrfs_info_in_rcu(device->fs_info,
"device fsid %pU devid %llu moved old:%s new:%s",
disk_super->fsid, devid,
rcu_str_deref(device->name), path);
}
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
mutex_unlock(&fs_devices->device_list_mutex);
return ERR_PTR(-ENOMEM);
}
rcu_string_free(device->name);
rcu_assign_pointer(device->name, name);
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
fs_devices->missing_devices--;
clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
}
/*
* Unmount does not free the btrfs_device struct but would zero
* generation along with most of the other members. So just update
* it back. We need it to pick the disk with largest generation
* (as above).
*/
if (!fs_devices->opened) {
device->generation = found_transid;
fs_devices->latest_generation = max_t(u64, found_transid,
fs_devices->latest_generation);
}
fs_devices->total_devices = btrfs_super_num_devices(disk_super);
mutex_unlock(&fs_devices->device_list_mutex);
return device;
}
static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
{
struct btrfs_fs_devices *fs_devices;
struct btrfs_device *device;
struct btrfs_device *orig_dev;
fs_devices = alloc_fs_devices(orig->fsid, NULL);
if (IS_ERR(fs_devices))
return fs_devices;
mutex_lock(&orig->device_list_mutex);
fs_devices->total_devices = orig->total_devices;
/* We have held the volume lock, it is safe to get the devices. */
list_for_each_entry(orig_dev, &orig->devices, dev_list) {
struct rcu_string *name;
device = btrfs_alloc_device(NULL, &orig_dev->devid,
orig_dev->uuid);
if (IS_ERR(device))
goto error;
/*
* This is ok to do without rcu read locked because we hold the
* uuid mutex so nothing we touch in here is going to disappear.
*/
if (orig_dev->name) {
name = rcu_string_strdup(orig_dev->name->str,
GFP_KERNEL);
if (!name) {
btrfs_free_device(device);
goto error;
}
rcu_assign_pointer(device->name, name);
}
list_add(&device->dev_list, &fs_devices->devices);
device->fs_devices = fs_devices;
fs_devices->num_devices++;
}
mutex_unlock(&orig->device_list_mutex);
return fs_devices;
error:
mutex_unlock(&orig->device_list_mutex);
free_fs_devices(fs_devices);
return ERR_PTR(-ENOMEM);
}
/*
* After we have read the system tree and know devids belonging to
* this filesystem, remove the device which does not belong there.
*/
void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
{
struct btrfs_device *device, *next;
struct btrfs_device *latest_dev = NULL;
mutex_lock(&uuid_mutex);
again:
/* This is the initialized path, it is safe to release the devices. */
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&device->dev_state)) {
if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
&device->dev_state) &&
(!latest_dev ||
device->generation > latest_dev->generation)) {
latest_dev = device;
}
continue;
}
if (device->devid == BTRFS_DEV_REPLACE_DEVID) {
/*
* In the first step, keep the device which has
* the correct fsid and the devid that is used
* for the dev_replace procedure.
* In the second step, the dev_replace state is
* read from the device tree and it is known
* whether the procedure is really active or
* not, which means whether this device is
* used or whether it should be removed.
*/
if (step == 0 || test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
&device->dev_state)) {
continue;
}
}
if (device->bdev) {
blkdev_put(device->bdev, device->mode);
device->bdev = NULL;
fs_devices->open_devices--;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
list_del_init(&device->dev_alloc_list);
clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
&device->dev_state))
fs_devices->rw_devices--;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
btrfs_free_device(device);
}
if (fs_devices->seed) {
fs_devices = fs_devices->seed;
goto again;
}
fs_devices->latest_bdev = latest_dev->bdev;
mutex_unlock(&uuid_mutex);
}
static void free_device_rcu(struct rcu_head *head)
{
struct btrfs_device *device;
device = container_of(head, struct btrfs_device, rcu);
btrfs_free_device(device);
}
static void btrfs_close_bdev(struct btrfs_device *device)
{
if (!device->bdev)
return;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
sync_blockdev(device->bdev);
invalidate_bdev(device->bdev);
}
blkdev_put(device->bdev, device->mode);
}
static void btrfs_close_one_device(struct btrfs_device *device)
{
struct btrfs_fs_devices *fs_devices = device->fs_devices;
struct btrfs_device *new_device;
struct rcu_string *name;
if (device->bdev)
fs_devices->open_devices--;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
device->devid != BTRFS_DEV_REPLACE_DEVID) {
list_del_init(&device->dev_alloc_list);
fs_devices->rw_devices--;
}
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
fs_devices->missing_devices--;
btrfs_close_bdev(device);
new_device = btrfs_alloc_device(NULL, &device->devid,
device->uuid);
BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
/* Safe because we are under uuid_mutex */
if (device->name) {
name = rcu_string_strdup(device->name->str, GFP_NOFS);
BUG_ON(!name); /* -ENOMEM */
rcu_assign_pointer(new_device->name, name);
}
list_replace_rcu(&device->dev_list, &new_device->dev_list);
new_device->fs_devices = device->fs_devices;
call_rcu(&device->rcu, free_device_rcu);
}
static int close_fs_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *tmp;
if (--fs_devices->opened > 0)
return 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list) {
btrfs_close_one_device(device);
}
mutex_unlock(&fs_devices->device_list_mutex);
WARN_ON(fs_devices->open_devices);
WARN_ON(fs_devices->rw_devices);
fs_devices->opened = 0;
fs_devices->seeding = 0;
return 0;
}
int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_fs_devices *seed_devices = NULL;
int ret;
mutex_lock(&uuid_mutex);
ret = close_fs_devices(fs_devices);
if (!fs_devices->opened) {
seed_devices = fs_devices->seed;
fs_devices->seed = NULL;
}
mutex_unlock(&uuid_mutex);
while (seed_devices) {
fs_devices = seed_devices;
seed_devices = fs_devices->seed;
close_fs_devices(fs_devices);
free_fs_devices(fs_devices);
}
return ret;
}
static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder)
{
struct btrfs_device *device;
struct btrfs_device *latest_dev = NULL;
int ret = 0;
flags |= FMODE_EXCL;
list_for_each_entry(device, &fs_devices->devices, dev_list) {
/* Just open everything we can; ignore failures here */
if (btrfs_open_one_device(fs_devices, device, flags, holder))
continue;
if (!latest_dev ||
device->generation > latest_dev->generation)
latest_dev = device;
}
if (fs_devices->open_devices == 0) {
ret = -EINVAL;
goto out;
}
fs_devices->opened = 1;
fs_devices->latest_bdev = latest_dev->bdev;
fs_devices->total_rw_bytes = 0;
out:
return ret;
}
static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
{
struct btrfs_device *dev1, *dev2;
dev1 = list_entry(a, struct btrfs_device, dev_list);
dev2 = list_entry(b, struct btrfs_device, dev_list);
if (dev1->devid < dev2->devid)
return -1;
else if (dev1->devid > dev2->devid)
return 1;
return 0;
}
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder)
{
int ret;
lockdep_assert_held(&uuid_mutex);
mutex_lock(&fs_devices->device_list_mutex);
if (fs_devices->opened) {
fs_devices->opened++;
ret = 0;
} else {
list_sort(NULL, &fs_devices->devices, devid_cmp);
ret = open_fs_devices(fs_devices, flags, holder);
}
mutex_unlock(&fs_devices->device_list_mutex);
return ret;
}
static void btrfs_release_disk_super(struct page *page)
{
kunmap(page);
put_page(page);
}
static int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr,
struct page **page,
struct btrfs_super_block **disk_super)
{
void *p;
pgoff_t index;
/* make sure our super fits in the device */
if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
return 1;
/* make sure our super fits in the page */
if (sizeof(**disk_super) > PAGE_SIZE)
return 1;
/* make sure our super doesn't straddle pages on disk */
index = bytenr >> PAGE_SHIFT;
if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index)
return 1;
/* pull in the page with our super */
*page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
index, GFP_KERNEL);
if (IS_ERR_OR_NULL(*page))
return 1;
p = kmap(*page);
/* align our pointer to the offset of the super block */
*disk_super = p + offset_in_page(bytenr);
if (btrfs_super_bytenr(*disk_super) != bytenr ||
btrfs_super_magic(*disk_super) != BTRFS_MAGIC) {
btrfs_release_disk_super(*page);
return 1;
}
if ((*disk_super)->label[0] &&
(*disk_super)->label[BTRFS_LABEL_SIZE - 1])
(*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0';
return 0;
}
/*
* Look for a btrfs signature on a device. This may be called out of the mount path
* and we are not allowed to call set_blocksize during the scan. The superblock
* is read via pagecache
*/
struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
void *holder)
{
struct btrfs_super_block *disk_super;
bool new_device_added = false;
struct btrfs_device *device = NULL;
struct block_device *bdev;
struct page *page;
u64 bytenr;
lockdep_assert_held(&uuid_mutex);
/*
* we would like to check all the supers, but that would make
* a btrfs mount succeed after a mkfs from a different FS.
* So, we need to add a special mount option to scan for
* later supers, using BTRFS_SUPER_MIRROR_MAX instead
*/
bytenr = btrfs_sb_offset(0);
flags |= FMODE_EXCL;
bdev = blkdev_get_by_path(path, flags, holder);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) {
device = ERR_PTR(-EINVAL);
goto error_bdev_put;
}
device = device_list_add(path, disk_super, &new_device_added);
if (!IS_ERR(device)) {
if (new_device_added)
btrfs_free_stale_devices(path, device);
}
btrfs_release_disk_super(page);
error_bdev_put:
blkdev_put(bdev, flags);
return device;
}
static int contains_pending_extent(struct btrfs_transaction *transaction,
struct btrfs_device *device,
u64 *start, u64 len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct extent_map *em;
struct list_head *search_list = &fs_info->pinned_chunks;
int ret = 0;
u64 physical_start = *start;
if (transaction)
search_list = &transaction->pending_chunks;
again:
list_for_each_entry(em, search_list, list) {
struct map_lookup *map;
int i;
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
u64 end;
if (map->stripes[i].dev != device)
continue;
if (map->stripes[i].physical >= physical_start + len ||
map->stripes[i].physical + em->orig_block_len <=
physical_start)
continue;
/*
* Make sure that while processing the pinned list we do
* not override our *start with a lower value, because
* we can have pinned chunks that fall within this
* device hole and that have lower physical addresses
* than the pending chunks we processed before. If we
* do not take this special care we can end up getting
* 2 pending chunks that start at the same physical
* device offsets because the end offset of a pinned
* chunk can be equal to the start offset of some
* pending chunk.
*/
end = map->stripes[i].physical + em->orig_block_len;
if (end > *start) {
*start = end;
ret = 1;
}
}
}
if (search_list != &fs_info->pinned_chunks) {
search_list = &fs_info->pinned_chunks;
goto again;
}
return ret;
}
/*
* find_free_dev_extent_start - find free space in the specified device
* @device: the device which we search the free space in
* @num_bytes: the size of the free space that we need
* @search_start: the position from which to begin the search
* @start: store the start of the free space.
* @len: the size of the free space. that we find, or the size
* of the max free space if we don't find suitable free space
*
* this uses a pretty simple search, the expectation is that it is
* called very infrequently and that a given device has a small number
* of extents
*
* @start is used to store the start of the free space if we find. But if we
* don't find suitable free space, it will be used to store the start position
* of the max free space.
*
* @len is used to store the size of the free space that we find.
* But if we don't find suitable free space, it is used to store the size of
* the max free space.
*/
int find_free_dev_extent_start(struct btrfs_transaction *transaction,
struct btrfs_device *device, u64 num_bytes,
u64 search_start, u64 *start, u64 *len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
struct btrfs_dev_extent *dev_extent;
struct btrfs_path *path;
u64 hole_size;
u64 max_hole_start;
u64 max_hole_size;
u64 extent_end;
u64 search_end = device->total_bytes;
int ret;
int slot;
struct extent_buffer *l;
/*
* We don't want to overwrite the superblock on the drive nor any area
* used by the boot loader (grub for example), so we make sure to start
* at an offset of at least 1MB.
*/
search_start = max_t(u64, search_start, SZ_1M);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
max_hole_start = search_start;
max_hole_size = 0;
again:
if (search_start >= search_end ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = -ENOSPC;
goto out;
}
path->reada = READA_FORWARD;
path->search_commit_root = 1;
path->skip_locking = 1;
key.objectid = device->devid;
key.offset = search_start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid, key.type);
if (ret < 0)
goto out;
}
while (1) {
l = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(l)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto out;
break;
}
btrfs_item_key_to_cpu(l, &key, slot);
if (key.objectid < device->devid)
goto next;
if (key.objectid > device->devid)
break;
if (key.type != BTRFS_DEV_EXTENT_KEY)
goto next;
if (key.offset > search_start) {
hole_size = key.offset - search_start;
/*
* Have to check before we set max_hole_start, otherwise
* we could end up sending back this offset anyway.
*/
if (contains_pending_extent(transaction, device,
&search_start,
hole_size)) {
if (key.offset >= search_start) {
hole_size = key.offset - search_start;
} else {
WARN_ON_ONCE(1);
hole_size = 0;
}
}
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
/*
* If this free space is greater than which we need,
* it must be the max free space that we have found
* until now, so max_hole_start must point to the start
* of this free space and the length of this free space
* is stored in max_hole_size. Thus, we return
* max_hole_start and max_hole_size and go back to the
* caller.
*/
if (hole_size >= num_bytes) {
ret = 0;
goto out;
}
}
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
extent_end = key.offset + btrfs_dev_extent_length(l,
dev_extent);
if (extent_end > search_start)
search_start = extent_end;
next:
path->slots[0]++;
cond_resched();
}
/*
* At this point, search_start should be the end of
* allocated dev extents, and when shrinking the device,
* search_end may be smaller than search_start.
*/
if (search_end > search_start) {
hole_size = search_end - search_start;
if (contains_pending_extent(transaction, device, &search_start,
hole_size)) {
btrfs_release_path(path);
goto again;
}
if (hole_size > max_hole_size) {
max_hole_start = search_start;
max_hole_size = hole_size;
}
}
/* See above. */
if (max_hole_size < num_bytes)
ret = -ENOSPC;
else
ret = 0;
out:
btrfs_free_path(path);
*start = max_hole_start;
if (len)
*len = max_hole_size;
return ret;
}
int find_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *len)
{
/* FIXME use last free of some kind */
return find_free_dev_extent_start(trans->transaction, device,
num_bytes, 0, start, len);
}
static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 start, u64 *dev_extent_len)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf = NULL;
struct btrfs_dev_extent *extent = NULL;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = device->devid;
key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY;
again:
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) {
ret = btrfs_previous_item(root, path, key.objectid,
BTRFS_DEV_EXTENT_KEY);
if (ret)
goto out;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
BUG_ON(found_key.offset > start || found_key.offset +
btrfs_dev_extent_length(leaf, extent) < start);
key = found_key;
btrfs_release_path(path);
goto again;
} else if (ret == 0) {
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
} else {
btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
goto out;
}
*dev_extent_len = btrfs_dev_extent_length(leaf, extent);
ret = btrfs_del_item(trans, root, path);
if (ret) {
btrfs_handle_fs_error(fs_info, ret,
"Failed to remove dev extent item");
} else {
set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
}
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
struct btrfs_device *device,
u64 chunk_offset, u64 start, u64 num_bytes)
{
int ret;
struct btrfs_path *path;
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_dev_extent *extent;
struct extent_buffer *leaf;
struct btrfs_key key;
WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = device->devid;
key.offset = start;
key.type = BTRFS_DEV_EXTENT_KEY;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*extent));
if (ret)
goto out;
leaf = path->nodes[0];
extent = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_extent);
btrfs_set_dev_extent_chunk_tree(leaf, extent,
BTRFS_CHUNK_TREE_OBJECTID);
btrfs_set_dev_extent_chunk_objectid(leaf, extent,
BTRFS_FIRST_CHUNK_TREE_OBJECTID);
btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
btrfs_set_dev_extent_length(leaf, extent, num_bytes);
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
return ret;
}
static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
struct rb_node *n;
u64 ret = 0;
em_tree = &fs_info->mapping_tree.map_tree;
read_lock(&em_tree->lock);
n = rb_last(&em_tree->map.rb_root);
if (n) {
em = rb_entry(n, struct extent_map, rb_node);
ret = em->start + em->len;
}
read_unlock(&em_tree->lock);
return ret;
}
static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
u64 *devid_ret)
{
int ret;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_path *path;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = (u64)-1;
ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
if (ret < 0)
goto error;
BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(fs_info->chunk_root, path,
BTRFS_DEV_ITEMS_OBJECTID,
BTRFS_DEV_ITEM_KEY);
if (ret) {
*devid_ret = 1;
} else {
btrfs_item_key_to_cpu(path->nodes[0], &found_key,
path->slots[0]);
*devid_ret = found_key.offset + 1;
}
ret = 0;
error:
btrfs_free_path(path);
return ret;
}
/*
* the device information is stored in the chunk root
* the btrfs_device struct should be fully filled in
*/
static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
unsigned long ptr;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
&key, sizeof(*dev_item));
if (ret)
goto out;
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
btrfs_set_device_id(leaf, dev_item, device->devid);
btrfs_set_device_generation(leaf, dev_item, 0);
btrfs_set_device_type(leaf, dev_item, device->type);
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
btrfs_set_device_total_bytes(leaf, dev_item,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
btrfs_set_device_group(leaf, dev_item, 0);
btrfs_set_device_seek_speed(leaf, dev_item, 0);
btrfs_set_device_bandwidth(leaf, dev_item, 0);
btrfs_set_device_start_offset(leaf, dev_item, 0);
ptr = btrfs_device_uuid(dev_item);
write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
ptr = btrfs_device_fsid(dev_item);
write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
ptr, BTRFS_FSID_SIZE);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
/*
* Function to update ctime/mtime for a given device path.
* Mainly used for ctime/mtime based probe like libblkid.
*/
static void update_dev_time(const char *path_name)
{
struct file *filp;
filp = filp_open(path_name, O_RDWR, 0);
if (IS_ERR(filp))
return;
file_update_time(filp);
filp_close(filp, NULL);
}
static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
struct btrfs_device *device)
{
struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_trans_handle *trans;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret) {
if (ret > 0)
ret = -ENOENT;
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
}
ret = btrfs_del_item(trans, root, path);
if (ret) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
}
out:
btrfs_free_path(path);
if (!ret)
ret = btrfs_commit_transaction(trans);
return ret;
}
/*
* Verify that @num_devices satisfies the RAID profile constraints in the whole
* filesystem. It's up to the caller to adjust that number regarding eg. device
* replace.
*/
static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
u64 num_devices)
{
u64 all_avail;
unsigned seq;
int i;
do {
seq = read_seqbegin(&fs_info->profiles_lock);
all_avail = fs_info->avail_data_alloc_bits |
fs_info->avail_system_alloc_bits |
fs_info->avail_metadata_alloc_bits;
} while (read_seqretry(&fs_info->profiles_lock, seq));
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
if (!(all_avail & btrfs_raid_array[i].bg_flag))
continue;
if (num_devices < btrfs_raid_array[i].devs_min) {
int ret = btrfs_raid_array[i].mindev_error;
if (ret)
return ret;
}
}
return 0;
}
static struct btrfs_device * btrfs_find_next_active_device(
struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
{
struct btrfs_device *next_device;
list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
if (next_device != device &&
!test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
&& next_device->bdev)
return next_device;
}
return NULL;
}
/*
* Helper function to check if the given device is part of s_bdev / latest_bdev
* and replace it with the provided or the next active device, in the context
* where this function called, there should be always be another device (or
* this_dev) which is active.
*/
void btrfs_assign_next_active_device(struct btrfs_device *device,
struct btrfs_device *this_dev)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_device *next_device;
if (this_dev)
next_device = this_dev;
else
next_device = btrfs_find_next_active_device(fs_info->fs_devices,
device);
ASSERT(next_device);
if (fs_info->sb->s_bdev &&
(fs_info->sb->s_bdev == device->bdev))
fs_info->sb->s_bdev = next_device->bdev;
if (fs_info->fs_devices->latest_bdev == device->bdev)
fs_info->fs_devices->latest_bdev = next_device->bdev;
}
/*
* Return btrfs_fs_devices::num_devices excluding the device that's being
* currently replaced.
*/
static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
{
u64 num_devices = fs_info->fs_devices->num_devices;
down_read(&fs_info->dev_replace.rwsem);
if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
ASSERT(num_devices > 1);
num_devices--;
}
up_read(&fs_info->dev_replace.rwsem);
return num_devices;
}
int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
u64 devid)
{
struct btrfs_device *device;
struct btrfs_fs_devices *cur_devices;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
u64 num_devices;
int ret = 0;
mutex_lock(&uuid_mutex);
num_devices = btrfs_num_devices(fs_info);
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
if (ret)
goto out;
device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
if (IS_ERR(device)) {
if (PTR_ERR(device) == -ENOENT &&
strcmp(device_path, "missing") == 0)
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
else
ret = PTR_ERR(device);
goto out;
}
if (btrfs_pinned_by_swapfile(fs_info, device)) {
btrfs_warn_in_rcu(fs_info,
"cannot remove device %s (devid %llu) due to active swapfile",
rcu_str_deref(device->name), device->devid);
ret = -ETXTBSY;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
ret = BTRFS_ERROR_DEV_TGT_REPLACE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
fs_info->fs_devices->rw_devices == 1) {
ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
goto out;
}
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
mutex_unlock(&fs_info->chunk_mutex);
}
mutex_unlock(&uuid_mutex);
ret = btrfs_shrink_device(device, 0);
mutex_lock(&uuid_mutex);
if (ret)
goto error_undo;
/*
* TODO: the superblock still includes this device in its num_devices
* counter although write_all_supers() is not locked out. This
* could give a filesystem state which requires a degraded mount.
*/
ret = btrfs_rm_dev_item(fs_info, device);
if (ret)
goto error_undo;
clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
btrfs_scrub_cancel_dev(fs_info, device);
/*
* the device list mutex makes sure that we don't change
* the device list while someone else is writing out all
* the device supers. Whoever is writing all supers, should
* lock the device list mutex before getting the number of
* devices in the super block (super_copy). Conversely,
* whoever updates the number of devices in the super block
* (super_copy) should hold the device list mutex.
*/
/*
* In normal cases the cur_devices == fs_devices. But in case
* of deleting a seed device, the cur_devices should point to
* its own fs_devices listed under the fs_devices->seed.
*/
cur_devices = device->fs_devices;
mutex_lock(&fs_devices->device_list_mutex);
list_del_rcu(&device->dev_list);
cur_devices->num_devices--;
cur_devices->total_devices--;
/* Update total_devices of the parent fs_devices if it's seed */
if (cur_devices != fs_devices)
fs_devices->total_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
cur_devices->missing_devices--;
btrfs_assign_next_active_device(device, NULL);
if (device->bdev) {
cur_devices->open_devices--;
/* remove sysfs entry */
btrfs_sysfs_rm_device_link(fs_devices, device);
}
num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
mutex_unlock(&fs_devices->device_list_mutex);
/*
* at this point, the device is zero sized and detached from
* the devices list. All that's left is to zero out the old
* supers and free the device.
*/
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
btrfs_scratch_superblocks(device->bdev, device->name->str);
btrfs_close_bdev(device);
call_rcu(&device->rcu, free_device_rcu);
if (cur_devices->open_devices == 0) {
while (fs_devices) {
if (fs_devices->seed == cur_devices) {
fs_devices->seed = cur_devices->seed;
break;
}
fs_devices = fs_devices->seed;
}
cur_devices->seed = NULL;
close_fs_devices(cur_devices);
free_fs_devices(cur_devices);
}
out:
mutex_unlock(&uuid_mutex);
return ret;
error_undo:
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_add(&device->dev_alloc_list,
&fs_devices->alloc_list);
device->fs_devices->rw_devices++;
mutex_unlock(&fs_info->chunk_mutex);
}
goto out;
}
void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
{
struct btrfs_fs_devices *fs_devices;
lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
/*
* in case of fs with no seed, srcdev->fs_devices will point
* to fs_devices of fs_info. However when the dev being replaced is
* a seed dev it will point to the seed's local fs_devices. In short
* srcdev will have its correct fs_devices in both the cases.
*/
fs_devices = srcdev->fs_devices;
list_del_rcu(&srcdev->dev_list);
list_del(&srcdev->dev_alloc_list);
fs_devices->num_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
fs_devices->missing_devices--;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
fs_devices->rw_devices--;
if (srcdev->bdev)
fs_devices->open_devices--;
}
void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
struct btrfs_device *srcdev)
{
struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) {
/* zero out the old super if it is writable */
btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str);
}
btrfs_close_bdev(srcdev);
call_rcu(&srcdev->rcu, free_device_rcu);
/* if this is no devs we rather delete the fs_devices */
if (!fs_devices->num_devices) {
struct btrfs_fs_devices *tmp_fs_devices;
/*
* On a mounted FS, num_devices can't be zero unless it's a
* seed. In case of a seed device being replaced, the replace
* target added to the sprout FS, so there will be no more
* device left under the seed FS.
*/
ASSERT(fs_devices->seeding);
tmp_fs_devices = fs_info->fs_devices;
while (tmp_fs_devices) {
if (tmp_fs_devices->seed == fs_devices) {
tmp_fs_devices->seed = fs_devices->seed;
break;
}
tmp_fs_devices = tmp_fs_devices->seed;
}
fs_devices->seed = NULL;
close_fs_devices(fs_devices);
free_fs_devices(fs_devices);
}
}
void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
{
struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
WARN_ON(!tgtdev);
mutex_lock(&fs_devices->device_list_mutex);
btrfs_sysfs_rm_device_link(fs_devices, tgtdev);
if (tgtdev->bdev)
fs_devices->open_devices--;
fs_devices->num_devices--;
btrfs_assign_next_active_device(tgtdev, NULL);
list_del_rcu(&tgtdev->dev_list);
mutex_unlock(&fs_devices->device_list_mutex);
/*
* The update_dev_time() with in btrfs_scratch_superblocks()
* may lead to a call to btrfs_show_devname() which will try
* to hold device_list_mutex. And here this device
* is already out of device list, so we don't have to hold
* the device_list_mutex lock.
*/
btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str);
btrfs_close_bdev(tgtdev);
call_rcu(&tgtdev->rcu, free_device_rcu);
}
static struct btrfs_device *btrfs_find_device_by_path(
struct btrfs_fs_info *fs_info, const char *device_path)
{
int ret = 0;
struct btrfs_super_block *disk_super;
u64 devid;
u8 *dev_uuid;
struct block_device *bdev;
struct buffer_head *bh;
struct btrfs_device *device;
ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
fs_info->bdev_holder, 0, &bdev, &bh);
if (ret)
return ERR_PTR(ret);
disk_super = (struct btrfs_super_block *)bh->b_data;
devid = btrfs_stack_device_id(&disk_super->dev_item);
dev_uuid = disk_super->dev_item.uuid;
if (btrfs_fs_incompat(fs_info, METADATA_UUID))
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
disk_super->metadata_uuid, true);
else
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
disk_super->fsid, true);
brelse(bh);
if (!device)
device = ERR_PTR(-ENOENT);
blkdev_put(bdev, FMODE_READ);
return device;
}
/*
* Lookup a device given by device id, or the path if the id is 0.
*/
struct btrfs_device *btrfs_find_device_by_devspec(
struct btrfs_fs_info *fs_info, u64 devid,
const char *device_path)
{
struct btrfs_device *device;
if (devid) {
device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
NULL, true);
if (!device)
return ERR_PTR(-ENOENT);
return device;
}
if (!device_path || !device_path[0])
return ERR_PTR(-EINVAL);
if (strcmp(device_path, "missing") == 0) {
/* Find first missing device */
list_for_each_entry(device, &fs_info->fs_devices->devices,
dev_list) {
if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&device->dev_state) && !device->bdev)
return device;
}
return ERR_PTR(-ENOENT);
}
return btrfs_find_device_by_path(fs_info, device_path);
}
/*
* does all the dirty work required for changing file system's UUID.
*/
static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_fs_devices *old_devices;
struct btrfs_fs_devices *seed_devices;
struct btrfs_super_block *disk_super = fs_info->super_copy;
struct btrfs_device *device;
u64 super_flags;
lockdep_assert_held(&uuid_mutex);
if (!fs_devices->seeding)
return -EINVAL;
seed_devices = alloc_fs_devices(NULL, NULL);
if (IS_ERR(seed_devices))
return PTR_ERR(seed_devices);
old_devices = clone_fs_devices(fs_devices);
if (IS_ERR(old_devices)) {
kfree(seed_devices);
return PTR_ERR(old_devices);
}
list_add(&old_devices->fs_list, &fs_uuids);
memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
seed_devices->opened = 1;
INIT_LIST_HEAD(&seed_devices->devices);
INIT_LIST_HEAD(&seed_devices->alloc_list);
mutex_init(&seed_devices->device_list_mutex);
mutex_lock(&fs_devices->device_list_mutex);
list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
synchronize_rcu);
list_for_each_entry(device, &seed_devices->devices, dev_list)
device->fs_devices = seed_devices;
mutex_lock(&fs_info->chunk_mutex);
list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
mutex_unlock(&fs_info->chunk_mutex);
fs_devices->seeding = 0;
fs_devices->num_devices = 0;
fs_devices->open_devices = 0;
fs_devices->missing_devices = 0;
fs_devices->rotating = 0;
fs_devices->seed = seed_devices;
generate_random_uuid(fs_devices->fsid);
memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
mutex_unlock(&fs_devices->device_list_mutex);
super_flags = btrfs_super_flags(disk_super) &
~BTRFS_SUPER_FLAG_SEEDING;
btrfs_set_super_flags(disk_super, super_flags);
return 0;
}
/*
* Store the expected generation for seed devices in device items.
*/
static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_dev_item *dev_item;
struct btrfs_device *device;
struct btrfs_key key;
u8 fs_uuid[BTRFS_FSID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
u64 devid;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.offset = 0;
key.type = BTRFS_DEV_ITEM_KEY;
while (1) {
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
goto error;
leaf = path->nodes[0];
next_slot:
if (path->slots[0] >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret > 0)
break;
if (ret < 0)
goto error;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
btrfs_release_path(path);
continue;
}
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
key.type != BTRFS_DEV_ITEM_KEY)
break;
dev_item = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_dev_item);
devid = btrfs_device_id(leaf, dev_item);
read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_FSID_SIZE);
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
fs_uuid, true);
BUG_ON(!device); /* Logic error */
if (device->fs_devices->seeding) {
btrfs_set_device_generation(leaf, dev_item,
device->generation);
btrfs_mark_buffer_dirty(leaf);
}
path->slots[0]++;
goto next_slot;
}
ret = 0;
error:
btrfs_free_path(path);
return ret;
}
int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
{
struct btrfs_root *root = fs_info->dev_root;
struct request_queue *q;
struct btrfs_trans_handle *trans;
struct btrfs_device *device;
struct block_device *bdev;
struct super_block *sb = fs_info->sb;
struct rcu_string *name;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
u64 orig_super_total_bytes;
u64 orig_super_num_devices;
int seeding_dev = 0;
int ret = 0;
bool unlocked = false;
if (sb_rdonly(sb) && !fs_devices->seeding)
return -EROFS;
bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
fs_info->bdev_holder);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
if (fs_devices->seeding) {
seeding_dev = 1;
down_write(&sb->s_umount);
mutex_lock(&uuid_mutex);
}
filemap_write_and_wait(bdev->bd_inode->i_mapping);
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
if (device->bdev == bdev) {
ret = -EEXIST;
mutex_unlock(
&fs_devices->device_list_mutex);
goto error;
}
}
mutex_unlock(&fs_devices->device_list_mutex);
device = btrfs_alloc_device(fs_info, NULL, NULL);
if (IS_ERR(device)) {
/* we can safely leave the fs_devices entry around */
ret = PTR_ERR(device);
goto error;
}
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
ret = -ENOMEM;
goto error_free_device;
}
rcu_assign_pointer(device->name, name);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto error_free_device;
}
q = bdev_get_queue(bdev);
set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
device->generation = trans->transid;
device->io_width = fs_info->sectorsize;
device->io_align = fs_info->sectorsize;
device->sector_size = fs_info->sectorsize;
device->total_bytes = round_down(i_size_read(bdev->bd_inode),
fs_info->sectorsize);
device->disk_total_bytes = device->total_bytes;
device->commit_total_bytes = device->total_bytes;
device->fs_info = fs_info;
device->bdev = bdev;
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
device->mode = FMODE_EXCL;
device->dev_stats_valid = 1;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
if (seeding_dev) {
sb->s_flags &= ~SB_RDONLY;
ret = btrfs_prepare_sprout(fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_trans;
}
}
device->fs_devices = fs_devices;
mutex_lock(&fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
list_add_rcu(&device->dev_list, &fs_devices->devices);
list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
fs_devices->num_devices++;
fs_devices->open_devices++;
fs_devices->rw_devices++;
fs_devices->total_devices++;
fs_devices->total_rw_bytes += device->total_bytes;
atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
if (!blk_queue_nonrot(q))
fs_devices->rotating = 1;
orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
btrfs_set_super_total_bytes(fs_info->super_copy,
round_down(orig_super_total_bytes + device->total_bytes,
fs_info->sectorsize));
orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
btrfs_set_super_num_devices(fs_info->super_copy,
orig_super_num_devices + 1);
/* add sysfs device entry */
btrfs_sysfs_add_device_link(fs_devices, device);
/*
* we've got more storage, clear any full flags on the space
* infos
*/
btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_devices->device_list_mutex);
if (seeding_dev) {
mutex_lock(&fs_info->chunk_mutex);
ret = init_first_rw_device(trans, fs_info);
mutex_unlock(&fs_info->chunk_mutex);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
}
ret = btrfs_add_dev_item(trans, device);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
if (seeding_dev) {
char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
ret = btrfs_finish_sprout(trans, fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto error_sysfs;
}
/* Sprouting would change fsid of the mounted root,
* so rename the fsid on the sysfs
*/
snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
fs_info->fs_devices->fsid);
if (kobject_rename(&fs_devices->fsid_kobj, fsid_buf))
btrfs_warn(fs_info,
"sysfs: failed to create fsid for sprout");
}
ret = btrfs_commit_transaction(trans);
if (seeding_dev) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
unlocked = true;
if (ret) /* transaction commit */
return ret;
ret = btrfs_relocate_sys_chunks(fs_info);
if (ret < 0)
btrfs_handle_fs_error(fs_info, ret,
"Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
trans = btrfs_attach_transaction(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) == -ENOENT)
return 0;
ret = PTR_ERR(trans);
trans = NULL;
goto error_sysfs;
}
ret = btrfs_commit_transaction(trans);
}
/* Update ctime/mtime for libblkid */
update_dev_time(device_path);
return ret;
error_sysfs:
btrfs_sysfs_rm_device_link(fs_devices, device);
mutex_lock(&fs_info->fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
list_del_rcu(&device->dev_list);
list_del(&device->dev_alloc_list);
fs_info->fs_devices->num_devices--;
fs_info->fs_devices->open_devices--;
fs_info->fs_devices->rw_devices--;
fs_info->fs_devices->total_devices--;
fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
btrfs_set_super_total_bytes(fs_info->super_copy,
orig_super_total_bytes);
btrfs_set_super_num_devices(fs_info->super_copy,
orig_super_num_devices);
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
error_trans:
if (seeding_dev)
sb->s_flags |= SB_RDONLY;
if (trans)
btrfs_end_transaction(trans);
error_free_device:
btrfs_free_device(device);
error:
blkdev_put(bdev, FMODE_EXCL);
if (seeding_dev && !unlocked) {
mutex_unlock(&uuid_mutex);
up_write(&sb->s_umount);
}
return ret;
}
static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
int ret;
struct btrfs_path *path;
struct btrfs_root *root = device->fs_info->chunk_root;
struct btrfs_dev_item *dev_item;
struct extent_buffer *leaf;
struct btrfs_key key;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret < 0)
goto out;
if (ret > 0) {
ret = -ENOENT;
goto out;
}
leaf = path->nodes[0];
dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
btrfs_set_device_id(leaf, dev_item, device->devid);
btrfs_set_device_type(leaf, dev_item, device->type);
btrfs_set_device_io_align(leaf, dev_item, device->io_align);
btrfs_set_device_io_width(leaf, dev_item, device->io_width);
btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
btrfs_set_device_total_bytes(leaf, dev_item,
btrfs_device_get_disk_total_bytes(device));
btrfs_set_device_bytes_used(leaf, dev_item,
btrfs_device_get_bytes_used(device));
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_grow_device(struct btrfs_trans_handle *trans,
struct btrfs_device *device, u64 new_size)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_fs_devices *fs_devices;
u64 old_total;
u64 diff;
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
return -EACCES;
new_size = round_down(new_size, fs_info->sectorsize);
mutex_lock(&fs_info->chunk_mutex);
old_total = btrfs_super_total_bytes(super_copy);
diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
if (new_size <= device->total_bytes ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
mutex_unlock(&fs_info->chunk_mutex);
return -EINVAL;
}
fs_devices = fs_info->fs_devices;
btrfs_set_super_total_bytes(super_copy,
round_down(old_total + diff, fs_info->sectorsize));
device->fs_devices->total_rw_bytes += diff;
btrfs_device_set_total_bytes(device, new_size);
btrfs_device_set_disk_total_bytes(device, new_size);
btrfs_clear_space_info_full(device->fs_info);
if (list_empty(&device->resized_list))
list_add_tail(&device->resized_list,
&fs_devices->resized_devices);
mutex_unlock(&fs_info->chunk_mutex);
return btrfs_update_device(trans, device);
}
static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *root = fs_info->chunk_root;
int ret;
struct btrfs_path *path;
struct btrfs_key key;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = chunk_offset;
key.type = BTRFS_CHUNK_ITEM_KEY;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
goto out;
else if (ret > 0) { /* Logic error or corruption */
btrfs_handle_fs_error(fs_info, -ENOENT,
"Failed lookup while freeing chunk.");
ret = -ENOENT;
goto out;
}
ret = btrfs_del_item(trans, root, path);
if (ret < 0)
btrfs_handle_fs_error(fs_info, ret,
"Failed to delete chunk item.");
out:
btrfs_free_path(path);
return ret;
}
static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_disk_key *disk_key;
struct btrfs_chunk *chunk;
u8 *ptr;
int ret = 0;
u32 num_stripes;
u32 array_size;
u32 len = 0;
u32 cur;
struct btrfs_key key;
mutex_lock(&fs_info->chunk_mutex);
array_size = btrfs_super_sys_array_size(super_copy);
ptr = super_copy->sys_chunk_array;
cur = 0;
while (cur < array_size) {
disk_key = (struct btrfs_disk_key *)ptr;
btrfs_disk_key_to_cpu(&key, disk_key);
len = sizeof(*disk_key);
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
chunk = (struct btrfs_chunk *)(ptr + len);
num_stripes = btrfs_stack_chunk_num_stripes(chunk);
len += btrfs_chunk_item_size(num_stripes);
} else {
ret = -EIO;
break;
}
if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
key.offset == chunk_offset) {
memmove(ptr, ptr + len, array_size - (cur + len));
array_size -= len;
btrfs_set_super_sys_array_size(super_copy, array_size);
} else {
ptr += len;
cur += len;
}
}
mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
/*
* btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
* @logical: Logical block offset in bytes.
* @length: Length of extent in bytes.
*
* Return: Chunk mapping or ERR_PTR.
*/
struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
u64 logical, u64 length)
{
struct extent_map_tree *em_tree;
struct extent_map *em;
em_tree = &fs_info->mapping_tree.map_tree;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, length);
read_unlock(&em_tree->lock);
if (!em) {
btrfs_crit(fs_info, "unable to find logical %llu length %llu",
logical, length);
return ERR_PTR(-EINVAL);
}
if (em->start > logical || em->start + em->len < logical) {
btrfs_crit(fs_info,
"found a bad mapping, wanted %llu-%llu, found %llu-%llu",
logical, length, em->start, em->start + em->len);
free_extent_map(em);
return ERR_PTR(-EINVAL);
}
/* callers are responsible for dropping em's ref. */
return em;
}
int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_map *em;
struct map_lookup *map;
u64 dev_extent_len = 0;
int i, ret = 0;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
if (IS_ERR(em)) {
/*
* This is a logic error, but we don't want to just rely on the
* user having built with ASSERT enabled, so if ASSERT doesn't
* do anything we still error out.
*/
ASSERT(0);
return PTR_ERR(em);
}
map = em->map_lookup;
mutex_lock(&fs_info->chunk_mutex);
check_system_chunk(trans, map->type);
mutex_unlock(&fs_info->chunk_mutex);
/*
* Take the device list mutex to prevent races with the final phase of
* a device replace operation that replaces the device object associated
* with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
*/
mutex_lock(&fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *device = map->stripes[i].dev;
ret = btrfs_free_dev_extent(trans, device,
map->stripes[i].physical,
&dev_extent_len);
if (ret) {
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, ret);
goto out;
}
if (device->bytes_used > 0) {
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_bytes_used(device,
device->bytes_used - dev_extent_len);
atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
btrfs_clear_space_info_full(fs_info);
mutex_unlock(&fs_info->chunk_mutex);
}
ret = btrfs_update_device(trans, device);
if (ret) {
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_abort_transaction(trans, ret);
goto out;
}
}
mutex_unlock(&fs_devices->device_list_mutex);
ret = btrfs_free_chunk(trans, chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = btrfs_remove_block_group(trans, chunk_offset, em);
if (ret) {
btrfs_abort_transaction(trans, ret);
goto out;
}
out:
/* once for us */
free_extent_map(em);
return ret;
}
static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_trans_handle *trans;
int ret;
/*
* Prevent races with automatic removal of unused block groups.
* After we relocate and before we remove the chunk with offset
* chunk_offset, automatic removal of the block group can kick in,
* resulting in a failure when calling btrfs_remove_chunk() below.
*
* Make sure to acquire this mutex before doing a tree search (dev
* or chunk trees) to find chunks. Otherwise the cleaner kthread might
* call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
* we release the path used to search the chunk/dev tree and before
* the current task acquires this mutex and calls us.
*/
lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_can_relocate(fs_info, chunk_offset);
if (ret)
return -ENOSPC;
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(fs_info);
ret = btrfs_relocate_block_group(fs_info, chunk_offset);
btrfs_scrub_continue(fs_info);
if (ret)
return ret;
/*
* We add the kobjects here (and after forcing data chunk creation)
* since relocation is the only place we'll create chunks of a new
* type at runtime. The only place where we'll remove the last
* chunk of a type is the call immediately below this one. Even
* so, we're protected against races with the cleaner thread since
* we're covered by the delete_unused_bgs_mutex.
*/
btrfs_add_raid_kobjects(fs_info);
trans = btrfs_start_trans_remove_block_group(root->fs_info,
chunk_offset);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
btrfs_handle_fs_error(root->fs_info, ret, NULL);
return ret;
}
/*
* step two, delete the device extents and the
* chunk tree entries
*/
ret = btrfs_remove_chunk(trans, chunk_offset);
btrfs_end_transaction(trans);
return ret;
}
static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_chunk *chunk;
struct btrfs_key key;
struct btrfs_key found_key;
u64 chunk_type;
bool retried = false;
int failed = 0;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
again:
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
mutex_lock(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
}
BUG_ON(ret == 0); /* Corruption */
ret = btrfs_previous_item(chunk_root, path, key.objectid,
key.type);
if (ret)
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto error;
if (ret > 0)
break;
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
chunk = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_chunk);
chunk_type = btrfs_chunk_type(leaf, chunk);
btrfs_release_path(path);
if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_relocate_chunk(fs_info, found_key.offset);
if (ret == -ENOSPC)
failed++;
else
BUG_ON(ret);
}
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (found_key.offset == 0)
break;
key.offset = found_key.offset - 1;
}
ret = 0;
if (failed && !retried) {
failed = 0;
retried = true;
goto again;
} else if (WARN_ON(failed && retried)) {
ret = -ENOSPC;
}
error:
btrfs_free_path(path);
return ret;
}
/*
* return 1 : allocate a data chunk successfully,
* return <0: errors during allocating a data chunk,
* return 0 : no need to allocate a data chunk.
*/
static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
u64 chunk_offset)
{
struct btrfs_block_group_cache *cache;
u64 bytes_used;
u64 chunk_type;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
ASSERT(cache);
chunk_type = cache->flags;
btrfs_put_block_group(cache);
if (chunk_type & BTRFS_BLOCK_GROUP_DATA) {
spin_lock(&fs_info->data_sinfo->lock);
bytes_used = fs_info->data_sinfo->bytes_used;
spin_unlock(&fs_info->data_sinfo->lock);
if (!bytes_used) {
struct btrfs_trans_handle *trans;
int ret;
trans = btrfs_join_transaction(fs_info->tree_root);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_force_chunk_alloc(trans,
BTRFS_BLOCK_GROUP_DATA);
btrfs_end_transaction(trans);
if (ret < 0)
return ret;
btrfs_add_raid_kobjects(fs_info);
return 1;
}
}
return 0;
}
static int insert_balance_item(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl)
{
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
struct btrfs_balance_item *item;
struct btrfs_disk_balance_args disk_bargs;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
int ret, err;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
key.objectid = BTRFS_BALANCE_OBJECTID;
key.type = BTRFS_TEMPORARY_ITEM_KEY;
key.offset = 0;
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*item));
if (ret)
goto out;
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
btrfs_set_balance_data(leaf, item, &disk_bargs);
btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
btrfs_set_balance_meta(leaf, item, &disk_bargs);
btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
btrfs_set_balance_sys(leaf, item, &disk_bargs);
btrfs_set_balance_flags(leaf, item, bctl->flags);
btrfs_mark_buffer_dirty(leaf);
out:
btrfs_free_path(path);
err = btrfs_commit_transaction(trans);
if (err && !ret)
ret = err;
return ret;
}
static int del_balance_item(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_trans_handle *trans;
struct btrfs_path *path;
struct btrfs_key key;
int ret, err;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
key.objectid = BTRFS_BALANCE_OBJECTID;
key.type = BTRFS_TEMPORARY_ITEM_KEY;
key.offset = 0;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
goto out;
if (ret > 0) {
ret = -ENOENT;
goto out;
}
ret = btrfs_del_item(trans, root, path);
out:
btrfs_free_path(path);
err = btrfs_commit_transaction(trans);
if (err && !ret)
ret = err;
return ret;
}
/*
* This is a heuristic used to reduce the number of chunks balanced on
* resume after balance was interrupted.
*/
static void update_balance_args(struct btrfs_balance_control *bctl)
{
/*
* Turn on soft mode for chunk types that were being converted.
*/
if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
/*
* Turn on usage filter if is not already used. The idea is
* that chunks that we have already balanced should be
* reasonably full. Don't do it for chunks that are being
* converted - that will keep us from relocating unconverted
* (albeit full) chunks.
*/
if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
!(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
bctl->data.usage = 90;
}
if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
!(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
bctl->sys.usage = 90;
}
if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
!(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
bctl->meta.usage = 90;
}
}
/*
* Clear the balance status in fs_info and delete the balance item from disk.
*/
static void reset_balance_state(struct btrfs_fs_info *fs_info)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
int ret;
BUG_ON(!fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock);
fs_info->balance_ctl = NULL;
spin_unlock(&fs_info->balance_lock);
kfree(bctl);
ret = del_balance_item(fs_info);
if (ret)
btrfs_handle_fs_error(fs_info, ret, NULL);
}
/*
* Balance filters. Return 1 if chunk should be filtered out
* (should not be balanced).
*/
static int chunk_profiles_filter(u64 chunk_type,
struct btrfs_balance_args *bargs)
{
chunk_type = chunk_to_extended(chunk_type) &
BTRFS_EXTENDED_PROFILE_MASK;
if (bargs->profiles & chunk_type)
return 0;
return 1;
}
static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
struct btrfs_balance_args *bargs)
{
struct btrfs_block_group_cache *cache;
u64 chunk_used;
u64 user_thresh_min;
u64 user_thresh_max;
int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = btrfs_block_group_used(&cache->item);
if (bargs->usage_min == 0)
user_thresh_min = 0;
else
user_thresh_min = div_factor_fine(cache->key.offset,
bargs->usage_min);
if (bargs->usage_max == 0)
user_thresh_max = 1;
else if (bargs->usage_max > 100)
user_thresh_max = cache->key.offset;
else
user_thresh_max = div_factor_fine(cache->key.offset,
bargs->usage_max);
if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
ret = 0;
btrfs_put_block_group(cache);
return ret;
}
static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
u64 chunk_offset, struct btrfs_balance_args *bargs)
{
struct btrfs_block_group_cache *cache;
u64 chunk_used, user_thresh;
int ret = 1;
cache = btrfs_lookup_block_group(fs_info, chunk_offset);
chunk_used = btrfs_block_group_used(&cache->item);
if (bargs->usage_min == 0)
user_thresh = 1;
else if (bargs->usage > 100)
user_thresh = cache->key.offset;
else
user_thresh = div_factor_fine(cache->key.offset,
bargs->usage);
if (chunk_used < user_thresh)
ret = 0;
btrfs_put_block_group(cache);
return ret;
}
static int chunk_devid_filter(struct extent_buffer *leaf,
struct btrfs_chunk *chunk,
struct btrfs_balance_args *bargs)
{
struct btrfs_stripe *stripe;
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
int i;
for (i = 0; i < num_stripes; i++) {
stripe = btrfs_stripe_nr(chunk, i);
if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
return 0;
}
return 1;
}
/* [pstart, pend) */
static int chunk_drange_filter(struct extent_buffer *leaf,
struct btrfs_chunk *chunk,
struct btrfs_balance_args *bargs)
{
struct btrfs_stripe *stripe;
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
u64 stripe_offset;
u64 stripe_length;
int factor;
int i;
if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
return 0;
if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
factor = num_stripes / 2;
} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
factor = num_stripes - 1;
} else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
factor = num_stripes - 2;
} else {
factor = num_stripes;
}
for (i = 0; i < num_stripes; i++) {
stripe = btrfs_stripe_nr(chunk, i);
if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
continue;
stripe_offset = btrfs_stripe_offset(leaf, stripe);
stripe_length = btrfs_chunk_length(leaf, chunk);
stripe_length = div_u64(stripe_length, factor);
if (stripe_offset < bargs->pend &&
stripe_offset + stripe_length > bargs->pstart)
return 0;
}
return 1;
}
/* [vstart, vend) */
static int chunk_vrange_filter(struct extent_buffer *leaf,
struct btrfs_chunk *chunk,
u64 chunk_offset,
struct btrfs_balance_args *bargs)
{
if (chunk_offset < bargs->vend &&
chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
/* at least part of the chunk is inside this vrange */
return 0;
return 1;
}
static int chunk_stripes_range_filter(struct extent_buffer *leaf,
struct btrfs_chunk *chunk,
struct btrfs_balance_args *bargs)
{
int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
if (bargs->stripes_min <= num_stripes
&& num_stripes <= bargs->stripes_max)
return 0;
return 1;
}
static int chunk_soft_convert_filter(u64 chunk_type,
struct btrfs_balance_args *bargs)
{
if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
return 0;
chunk_type = chunk_to_extended(chunk_type) &
BTRFS_EXTENDED_PROFILE_MASK;
if (bargs->target == chunk_type)
return 1;
return 0;
}
static int should_balance_chunk(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 chunk_offset)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
struct btrfs_balance_args *bargs = NULL;
u64 chunk_type = btrfs_chunk_type(leaf, chunk);
/* type filter */
if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
(bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
return 0;
}
if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
bargs = &bctl->data;
else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
bargs = &bctl->sys;
else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
bargs = &bctl->meta;
/* profiles filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
chunk_profiles_filter(chunk_type, bargs)) {
return 0;
}
/* usage filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
chunk_usage_filter(fs_info, chunk_offset, bargs)) {
return 0;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
return 0;
}
/* devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
chunk_devid_filter(leaf, chunk, bargs)) {
return 0;
}
/* drange filter, makes sense only with devid filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
chunk_drange_filter(leaf, chunk, bargs)) {
return 0;
}
/* vrange filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
return 0;
}
/* stripes filter */
if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
chunk_stripes_range_filter(leaf, chunk, bargs)) {
return 0;
}
/* soft profile changing mode */
if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
chunk_soft_convert_filter(chunk_type, bargs)) {
return 0;
}
/*
* limited by count, must be the last filter
*/
if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
if (bargs->limit == 0)
return 0;
else
bargs->limit--;
} else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
/*
* Same logic as the 'limit' filter; the minimum cannot be
* determined here because we do not have the global information
* about the count of all chunks that satisfy the filters.
*/
if (bargs->limit_max == 0)
return 0;
else
bargs->limit_max--;
}
return 1;
}
static int __btrfs_balance(struct btrfs_fs_info *fs_info)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
struct btrfs_root *chunk_root = fs_info->chunk_root;
u64 chunk_type;
struct btrfs_chunk *chunk;
struct btrfs_path *path = NULL;
struct btrfs_key key;
struct btrfs_key found_key;
struct extent_buffer *leaf;
int slot;
int ret;
int enospc_errors = 0;
bool counting = true;
/* The single value limit and min/max limits use the same bytes in the */
u64 limit_data = bctl->data.limit;
u64 limit_meta = bctl->meta.limit;
u64 limit_sys = bctl->sys.limit;
u32 count_data = 0;
u32 count_meta = 0;
u32 count_sys = 0;
int chunk_reserved = 0;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto error;
}
/* zero out stat counters */
spin_lock(&fs_info->balance_lock);
memset(&bctl->stat, 0, sizeof(bctl->stat));
spin_unlock(&fs_info->balance_lock);
again:
if (!counting) {
/*
* The single value limit and min/max limits use the same bytes
* in the
*/
bctl->data.limit = limit_data;
bctl->meta.limit = limit_meta;
bctl->sys.limit = limit_sys;
}
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.offset = (u64)-1;
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
atomic_read(&fs_info->balance_cancel_req)) {
ret = -ECANCELED;
goto error;
}
mutex_lock(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
}
/*
* this shouldn't happen, it means the last relocate
* failed
*/
if (ret == 0)
BUG(); /* FIXME break ? */
ret = btrfs_previous_item(chunk_root, path, 0,
BTRFS_CHUNK_ITEM_KEY);
if (ret) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
ret = 0;
break;
}
leaf = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.objectid != key.objectid) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
break;
}
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
chunk_type = btrfs_chunk_type(leaf, chunk);
if (!counting) {
spin_lock(&fs_info->balance_lock);
bctl->stat.considered++;
spin_unlock(&fs_info->balance_lock);
}
ret = should_balance_chunk(fs_info, leaf, chunk,
found_key.offset);
btrfs_release_path(path);
if (!ret) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto loop;
}
if (counting) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
spin_lock(&fs_info->balance_lock);
bctl->stat.expected++;
spin_unlock(&fs_info->balance_lock);
if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
count_data++;
else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
count_sys++;
else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
count_meta++;
goto loop;
}
/*
* Apply limit_min filter, no need to check if the LIMITS
* filter is used, limit_min is 0 by default
*/
if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
count_data < bctl->data.limit_min)
|| ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
count_meta < bctl->meta.limit_min)
|| ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
count_sys < bctl->sys.limit_min)) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto loop;
}
if (!chunk_reserved) {
/*
* We may be relocating the only data chunk we have,
* which could potentially end up with losing data's
* raid profile, so lets allocate an empty one in
* advance.
*/
ret = btrfs_may_alloc_data_chunk(fs_info,
found_key.offset);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto error;
} else if (ret == 1) {
chunk_reserved = 1;
}
}
ret = btrfs_relocate_chunk(fs_info, found_key.offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret == -ENOSPC) {
enospc_errors++;
} else if (ret == -ETXTBSY) {
btrfs_info(fs_info,
"skipping relocation of block group %llu due to active swapfile",
found_key.offset);
ret = 0;
} else if (ret) {
goto error;
} else {
spin_lock(&fs_info->balance_lock);
bctl->stat.completed++;
spin_unlock(&fs_info->balance_lock);
}
loop:
if (found_key.offset == 0)
break;
key.offset = found_key.offset - 1;
}
if (counting) {
btrfs_release_path(path);
counting = false;
goto again;
}
error:
btrfs_free_path(path);
if (enospc_errors) {
btrfs_info(fs_info, "%d enospc errors during balance",
enospc_errors);
if (!ret)
ret = -ENOSPC;
}
return ret;
}
/**
* alloc_profile_is_valid - see if a given profile is valid and reduced
* @flags: profile to validate
* @extended: if true @flags is treated as an extended profile
*/
static int alloc_profile_is_valid(u64 flags, int extended)
{
u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
BTRFS_BLOCK_GROUP_PROFILE_MASK);
flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
/* 1) check that all other bits are zeroed */
if (flags & ~mask)
return 0;
/* 2) see if profile is reduced */
if (flags == 0)
return !extended; /* "0" is valid for usual profiles */
/* true if exactly one bit set */
return is_power_of_2(flags);
}
static inline int balance_need_close(struct btrfs_fs_info *fs_info)
{
/* cancel requested || normal exit path */
return atomic_read(&fs_info->balance_cancel_req) ||
(atomic_read(&fs_info->balance_pause_req) == 0 &&
atomic_read(&fs_info->balance_cancel_req) == 0);
}
/* Non-zero return value signifies invalidity */
static inline int validate_convert_profile(struct btrfs_balance_args *bctl_arg,
u64 allowed)
{
return ((bctl_arg->flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(!alloc_profile_is_valid(bctl_arg->target, 1) ||
(bctl_arg->target & ~allowed)));
}
/*
* Fill @buf with textual description of balance filter flags @bargs, up to
* @size_buf including the terminating null. The output may be trimmed if it
* does not fit into the provided buffer.
*/
static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
u32 size_buf)
{
int ret;
u32 size_bp = size_buf;
char *bp = buf;
u64 flags = bargs->flags;
char tmp_buf[128] = {'\0'};
if (!flags)
return;
#define CHECK_APPEND_NOARG(a) \
do { \
ret = snprintf(bp, size_bp, (a)); \
if (ret < 0 || ret >= size_bp) \
goto out_overflow; \
size_bp -= ret; \
bp += ret; \
} while (0)
#define CHECK_APPEND_1ARG(a, v1) \
do { \
ret = snprintf(bp, size_bp, (a), (v1)); \
if (ret < 0 || ret >= size_bp) \
goto out_overflow; \
size_bp -= ret; \
bp += ret; \
} while (0)
#define CHECK_APPEND_2ARG(a, v1, v2) \
do { \
ret = snprintf(bp, size_bp, (a), (v1), (v2)); \
if (ret < 0 || ret >= size_bp) \
goto out_overflow; \
size_bp -= ret; \
bp += ret; \
} while (0)
if (flags & BTRFS_BALANCE_ARGS_CONVERT) {
int index = btrfs_bg_flags_to_raid_index(bargs->target);
CHECK_APPEND_1ARG("convert=%s,", get_raid_name(index));
}
if (flags & BTRFS_BALANCE_ARGS_SOFT)
CHECK_APPEND_NOARG("soft,");
if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
btrfs_describe_block_groups(bargs->profiles, tmp_buf,
sizeof(tmp_buf));
CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
}
if (flags & BTRFS_BALANCE_ARGS_USAGE)
CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
CHECK_APPEND_2ARG("usage=%u..%u,",
bargs->usage_min, bargs->usage_max);
if (flags & BTRFS_BALANCE_ARGS_DEVID)
CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
if (flags & BTRFS_BALANCE_ARGS_DRANGE)
CHECK_APPEND_2ARG("drange=%llu..%llu,",
bargs->pstart, bargs->pend);
if (flags & BTRFS_BALANCE_ARGS_VRANGE)
CHECK_APPEND_2ARG("vrange=%llu..%llu,",
bargs->vstart, bargs->vend);
if (flags & BTRFS_BALANCE_ARGS_LIMIT)
CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
CHECK_APPEND_2ARG("limit=%u..%u,",
bargs->limit_min, bargs->limit_max);
if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
CHECK_APPEND_2ARG("stripes=%u..%u,",
bargs->stripes_min, bargs->stripes_max);
#undef CHECK_APPEND_2ARG
#undef CHECK_APPEND_1ARG
#undef CHECK_APPEND_NOARG
out_overflow:
if (size_bp < size_buf)
buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
else
buf[0] = '\0';
}
static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
{
u32 size_buf = 1024;
char tmp_buf[192] = {'\0'};
char *buf;
char *bp;
u32 size_bp = size_buf;
int ret;
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
buf = kzalloc(size_buf, GFP_KERNEL);
if (!buf)
return;
bp = buf;
#define CHECK_APPEND_1ARG(a, v1) \
do { \
ret = snprintf(bp, size_bp, (a), (v1)); \
if (ret < 0 || ret >= size_bp) \
goto out_overflow; \
size_bp -= ret; \
bp += ret; \
} while (0)
if (bctl->flags & BTRFS_BALANCE_FORCE)
CHECK_APPEND_1ARG("%s", "-f ");
if (bctl->flags & BTRFS_BALANCE_DATA) {
describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
CHECK_APPEND_1ARG("-d%s ", tmp_buf);
}
if (bctl->flags & BTRFS_BALANCE_METADATA) {
describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
CHECK_APPEND_1ARG("-m%s ", tmp_buf);
}
if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
CHECK_APPEND_1ARG("-s%s ", tmp_buf);
}
#undef CHECK_APPEND_1ARG
out_overflow:
if (size_bp < size_buf)
buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
btrfs_info(fs_info, "balance: %s %s",
(bctl->flags & BTRFS_BALANCE_RESUME) ?
"resume" : "start", buf);
kfree(buf);
}
/*
* Should be called with balance mutexe held
*/
int btrfs_balance(struct btrfs_fs_info *fs_info,
struct btrfs_balance_control *bctl,
struct btrfs_ioctl_balance_args *bargs)
{
u64 meta_target, data_target;
u64 allowed;
int mixed = 0;
int ret;
u64 num_devices;
unsigned seq;
bool reducing_integrity;
if (btrfs_fs_closing(fs_info) ||
atomic_read(&fs_info->balance_pause_req) ||
atomic_read(&fs_info->balance_cancel_req)) {
ret = -EINVAL;
goto out;
}
allowed = btrfs_super_incompat_flags(fs_info->super_copy);
if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
mixed = 1;
/*
* In case of mixed groups both data and meta should be picked,
* and identical options should be given for both of them.
*/
allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
if (mixed && (bctl->flags & allowed)) {
if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
!(bctl->flags & BTRFS_BALANCE_METADATA) ||
memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
btrfs_err(fs_info,
"balance: mixed groups data and metadata options must be the same");
ret = -EINVAL;
goto out;
}
}
num_devices = btrfs_num_devices(fs_info);
allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP;
if (num_devices > 1)
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
if (num_devices > 2)
allowed |= BTRFS_BLOCK_GROUP_RAID5;
if (num_devices > 3)
allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID6);
if (validate_convert_profile(&bctl->data, allowed)) {
int index = btrfs_bg_flags_to_raid_index(bctl->data.target);
btrfs_err(fs_info,
"balance: invalid convert data profile %s",
get_raid_name(index));
ret = -EINVAL;
goto out;
}
if (validate_convert_profile(&bctl->meta, allowed)) {
int index = btrfs_bg_flags_to_raid_index(bctl->meta.target);
btrfs_err(fs_info,
"balance: invalid convert metadata profile %s",
get_raid_name(index));
ret = -EINVAL;
goto out;
}
if (validate_convert_profile(&bctl->sys, allowed)) {
int index = btrfs_bg_flags_to_raid_index(bctl->sys.target);
btrfs_err(fs_info,
"balance: invalid convert system profile %s",
get_raid_name(index));
ret = -EINVAL;
goto out;
}
/* allow to reduce meta or sys integrity only if force set */
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_RAID6;
do {
seq = read_seqbegin(&fs_info->profiles_lock);
if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(fs_info->avail_system_alloc_bits & allowed) &&
!(bctl->sys.target & allowed)) ||
((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(fs_info->avail_metadata_alloc_bits & allowed) &&
!(bctl->meta.target & allowed)))
reducing_integrity = true;
else
reducing_integrity = false;
/* if we're not converting, the target field is uninitialized */
meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
bctl->meta.target : fs_info->avail_metadata_alloc_bits;
data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
bctl->data.target : fs_info->avail_data_alloc_bits;
} while (read_seqretry(&fs_info->profiles_lock, seq));
if (reducing_integrity) {
if (bctl->flags & BTRFS_BALANCE_FORCE) {
btrfs_info(fs_info,
"balance: force reducing metadata integrity");
} else {
btrfs_err(fs_info,
"balance: reduces metadata integrity, use --force if you want this");
ret = -EINVAL;
goto out;
}
}
if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
int meta_index = btrfs_bg_flags_to_raid_index(meta_target);
int data_index = btrfs_bg_flags_to_raid_index(data_target);
btrfs_warn(fs_info,
"balance: metadata profile %s has lower redundancy than data profile %s",
get_raid_name(meta_index), get_raid_name(data_index));
}
ret = insert_balance_item(fs_info, bctl);
if (ret && ret != -EEXIST)
goto out;
if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
BUG_ON(ret == -EEXIST);
BUG_ON(fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock);
fs_info->balance_ctl = bctl;
spin_unlock(&fs_info->balance_lock);
} else {
BUG_ON(ret != -EEXIST);
spin_lock(&fs_info->balance_lock);
update_balance_args(bctl);
spin_unlock(&fs_info->balance_lock);
}
ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
describe_balance_start_or_resume(fs_info);
mutex_unlock(&fs_info->balance_mutex);
ret = __btrfs_balance(fs_info);
mutex_lock(&fs_info->balance_mutex);
if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
btrfs_info(fs_info, "balance: paused");
else if (ret == -ECANCELED && atomic_read(&fs_info->balance_cancel_req))
btrfs_info(fs_info, "balance: canceled");
else
btrfs_info(fs_info, "balance: ended with status: %d", ret);
clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
if (bargs) {
memset(bargs, 0, sizeof(*bargs));
btrfs_update_ioctl_balance_args(fs_info, bargs);
}
if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
balance_need_close(fs_info)) {
reset_balance_state(fs_info);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
}
wake_up(&fs_info->balance_wait_q);
return ret;
out:
if (bctl->flags & BTRFS_BALANCE_RESUME)
reset_balance_state(fs_info);
else
kfree(bctl);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
return ret;
}
static int balance_kthread(void *data)
{
struct btrfs_fs_info *fs_info = data;
int ret = 0;
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl)
ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
mutex_unlock(&fs_info->balance_mutex);
return ret;
}
int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
{
struct task_struct *tsk;
mutex_lock(&fs_info->balance_mutex);
if (!fs_info->balance_ctl) {
mutex_unlock(&fs_info->balance_mutex);
return 0;
}
mutex_unlock(&fs_info->balance_mutex);
if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
btrfs_info(fs_info, "balance: resume skipped");
return 0;
}
/*
* A ro->rw remount sequence should continue with the paused balance
* regardless of who pauses it, system or the user as of now, so set
* the resume flag.
*/
spin_lock(&fs_info->balance_lock);
fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
spin_unlock(&fs_info->balance_lock);
tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
return PTR_ERR_OR_ZERO(tsk);
}
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
{
struct btrfs_balance_control *bctl;
struct btrfs_balance_item *item;
struct btrfs_disk_balance_args disk_bargs;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
int ret;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
key.objectid = BTRFS_BALANCE_OBJECTID;
key.type = BTRFS_TEMPORARY_ITEM_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (ret > 0) { /* ret = -ENOENT; */
ret = 0;
goto out;
}
bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
if (!bctl) {
ret = -ENOMEM;
goto out;
}
leaf = path->nodes[0];
item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
bctl->flags = btrfs_balance_flags(leaf, item);
bctl->flags |= BTRFS_BALANCE_RESUME;
btrfs_balance_data(leaf, item, &disk_bargs);
btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
btrfs_balance_meta(leaf, item, &disk_bargs);
btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
btrfs_balance_sys(leaf, item, &disk_bargs);
btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
/*
* This should never happen, as the paused balance state is recovered
* during mount without any chance of other exclusive ops to collide.
*
* This gives the exclusive op status to balance and keeps in paused
* state until user intervention (cancel or umount). If the ownership
* cannot be assigned, show a message but do not fail. The balance
* is in a paused state and must have fs_info::balance_ctl properly
* set up.
*/
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags))
btrfs_warn(fs_info,
"balance: cannot set exclusive op status, resume manually");
mutex_lock(&fs_info->balance_mutex);
BUG_ON(fs_info->balance_ctl);
spin_lock(&fs_info->balance_lock);
fs_info->balance_ctl = bctl;
spin_unlock(&fs_info->balance_lock);
mutex_unlock(&fs_info->balance_mutex);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
{
int ret = 0;
mutex_lock(&fs_info->balance_mutex);
if (!fs_info->balance_ctl) {
mutex_unlock(&fs_info->balance_mutex);
return -ENOTCONN;
}
if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
atomic_inc(&fs_info->balance_pause_req);
mutex_unlock(&fs_info->balance_mutex);
wait_event(fs_info->balance_wait_q,
!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
mutex_lock(&fs_info->balance_mutex);
/* we are good with balance_ctl ripped off from under us */
BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
atomic_dec(&fs_info->balance_pause_req);
} else {
ret = -ENOTCONN;
}
mutex_unlock(&fs_info->balance_mutex);
return ret;
}
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
mutex_lock(&fs_info->balance_mutex);
if (!fs_info->balance_ctl) {
mutex_unlock(&fs_info->balance_mutex);
return -ENOTCONN;
}
/*
* A paused balance with the item stored on disk can be resumed at
* mount time if the mount is read-write. Otherwise it's still paused
* and we must not allow cancelling as it deletes the item.
*/
if (sb_rdonly(fs_info->sb)) {
mutex_unlock(&fs_info->balance_mutex);
return -EROFS;
}
atomic_inc(&fs_info->balance_cancel_req);
/*
* if we are running just wait and return, balance item is
* deleted in btrfs_balance in this case
*/
if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
mutex_unlock(&fs_info->balance_mutex);
wait_event(fs_info->balance_wait_q,
!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
mutex_lock(&fs_info->balance_mutex);
} else {
mutex_unlock(&fs_info->balance_mutex);
/*
* Lock released to allow other waiters to continue, we'll
* reexamine the status again.
*/
mutex_lock(&fs_info->balance_mutex);
if (fs_info->balance_ctl) {
reset_balance_state(fs_info);
clear_bit(BTRFS_FS_EXCL_OP, &fs_info->flags);
btrfs_info(fs_info, "balance: canceled");
}
}
BUG_ON(fs_info->balance_ctl ||
test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
atomic_dec(&fs_info->balance_cancel_req);
mutex_unlock(&fs_info->balance_mutex);
return 0;
}
static int btrfs_uuid_scan_kthread(void *data)
{
struct btrfs_fs_info *fs_info = data;
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_key key;
struct btrfs_path *path = NULL;
int ret = 0;
struct extent_buffer *eb;
int slot;
struct btrfs_root_item root_item;
u32 item_size;
struct btrfs_trans_handle *trans = NULL;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
key.objectid = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = 0;
while (1) {
ret = btrfs_search_forward(root, &key, path,
BTRFS_OLDEST_GENERATION);
if (ret) {
if (ret > 0)
ret = 0;
break;
}
if (key.type != BTRFS_ROOT_ITEM_KEY ||
(key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
key.objectid != BTRFS_FS_TREE_OBJECTID) ||
key.objectid > BTRFS_LAST_FREE_OBJECTID)
goto skip;
eb = path->nodes[0];
slot = path->slots[0];
item_size = btrfs_item_size_nr(eb, slot);
if (item_size < sizeof(root_item))
goto skip;
read_extent_buffer(eb, &root_item,
btrfs_item_ptr_offset(eb, slot),
(int)sizeof(root_item));
if (btrfs_root_refs(&root_item) == 0)
goto skip;
if (!btrfs_is_empty_uuid(root_item.uuid) ||
!btrfs_is_empty_uuid(root_item.received_uuid)) {
if (trans)
goto update_tree;
btrfs_release_path(path);
/*
* 1 - subvol uuid item
* 1 - received_subvol uuid item
*/
trans = btrfs_start_transaction(fs_info->uuid_root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
break;
}
continue;
} else {
goto skip;
}
update_tree:
if (!btrfs_is_empty_uuid(root_item.uuid)) {
ret = btrfs_uuid_tree_add(trans, root_item.uuid,
BTRFS_UUID_KEY_SUBVOL,
key.objectid);
if (ret < 0) {
btrfs_warn(fs_info, "uuid_tree_add failed %d",
ret);
break;
}
}
if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
ret = btrfs_uuid_tree_add(trans,
root_item.received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
key.objectid);
if (ret < 0) {
btrfs_warn(fs_info, "uuid_tree_add failed %d",
ret);
break;
}
}
skip:
if (trans) {
ret = btrfs_end_transaction(trans);
trans = NULL;
if (ret)
break;
}
btrfs_release_path(path);
if (key.offset < (u64)-1) {
key.offset++;
} else if (key.type < BTRFS_ROOT_ITEM_KEY) {
key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
} else if (key.objectid < (u64)-1) {
key.offset = 0;
key.type = BTRFS_ROOT_ITEM_KEY;
key.objectid++;
} else {
break;
}
cond_resched();
}
out:
btrfs_free_path(path);
if (trans && !IS_ERR(trans))
btrfs_end_transaction(trans);
if (ret)
btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
else
set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
up(&fs_info->uuid_tree_rescan_sem);
return 0;
}
/*
* Callback for btrfs_uuid_tree_iterate().
* returns:
* 0 check succeeded, the entry is not outdated.
* < 0 if an error occurred.
* > 0 if the check failed, which means the caller shall remove the entry.
*/
static int btrfs_check_uuid_tree_entry(struct btrfs_fs_info *fs_info,
u8 *uuid, u8 type, u64 subid)
{
struct btrfs_key key;
int ret = 0;
struct btrfs_root *subvol_root;
if (type != BTRFS_UUID_KEY_SUBVOL &&
type != BTRFS_UUID_KEY_RECEIVED_SUBVOL)
goto out;
key.objectid = subid;
key.type = BTRFS_ROOT_ITEM_KEY;
key.offset = (u64)-1;
subvol_root = btrfs_read_fs_root_no_name(fs_info, &key);
if (IS_ERR(subvol_root)) {
ret = PTR_ERR(subvol_root);
if (ret == -ENOENT)
ret = 1;
goto out;
}
switch (type) {
case BTRFS_UUID_KEY_SUBVOL:
if (memcmp(uuid, subvol_root->root_item.uuid, BTRFS_UUID_SIZE))
ret = 1;
break;
case BTRFS_UUID_KEY_RECEIVED_SUBVOL:
if (memcmp(uuid, subvol_root->root_item.received_uuid,
BTRFS_UUID_SIZE))
ret = 1;
break;
}
out:
return ret;
}
static int btrfs_uuid_rescan_kthread(void *data)
{
struct btrfs_fs_info *fs_info = (struct btrfs_fs_info *)data;
int ret;
/*
* 1st step is to iterate through the existing UUID tree and
* to delete all entries that contain outdated data.
* 2nd step is to add all missing entries to the UUID tree.
*/
ret = btrfs_uuid_tree_iterate(fs_info, btrfs_check_uuid_tree_entry);
if (ret < 0) {
btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret);
up(&fs_info->uuid_tree_rescan_sem);
return ret;
}
return btrfs_uuid_scan_kthread(data);
}
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *tree_root = fs_info->tree_root;
struct btrfs_root *uuid_root;
struct task_struct *task;
int ret;
/*
* 1 - root node
* 1 - root item
*/
trans = btrfs_start_transaction(tree_root, 2);
if (IS_ERR(trans))
return PTR_ERR(trans);
uuid_root = btrfs_create_tree(trans, fs_info,
BTRFS_UUID_TREE_OBJECTID);
if (IS_ERR(uuid_root)) {
ret = PTR_ERR(uuid_root);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
return ret;
}
fs_info->uuid_root = uuid_root;
ret = btrfs_commit_transaction(trans);
if (ret)
return ret;
down(&fs_info->uuid_tree_rescan_sem);
task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
if (IS_ERR(task)) {
/* fs_info->update_uuid_tree_gen remains 0 in all error case */
btrfs_warn(fs_info, "failed to start uuid_scan task");
up(&fs_info->uuid_tree_rescan_sem);
return PTR_ERR(task);
}
return 0;
}
int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info)
{
struct task_struct *task;
down(&fs_info->uuid_tree_rescan_sem);
task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid");
if (IS_ERR(task)) {
/* fs_info->update_uuid_tree_gen remains 0 in all error case */
btrfs_warn(fs_info, "failed to start uuid_rescan task");
up(&fs_info->uuid_tree_rescan_sem);
return PTR_ERR(task);
}
return 0;
}
/*
* shrinking a device means finding all of the device extents past
* the new size, and then following the back refs to the chunks.
* The chunk relocation code actually frees the device extent
*/
int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
{
struct btrfs_fs_info *fs_info = device->fs_info;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_trans_handle *trans;
struct btrfs_dev_extent *dev_extent = NULL;
struct btrfs_path *path;
u64 length;
u64 chunk_offset;
int ret;
int slot;
int failed = 0;
bool retried = false;
bool checked_pending_chunks = false;
struct extent_buffer *l;
struct btrfs_key key;
struct btrfs_super_block *super_copy = fs_info->super_copy;
u64 old_total = btrfs_super_total_bytes(super_copy);
u64 old_size = btrfs_device_get_total_bytes(device);
u64 diff;
new_size = round_down(new_size, fs_info->sectorsize);
diff = round_down(old_size - new_size, fs_info->sectorsize);
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
return -EINVAL;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_BACK;
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, new_size);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
device->fs_devices->total_rw_bytes -= diff;
atomic64_sub(diff, &fs_info->free_chunk_space);
}
mutex_unlock(&fs_info->chunk_mutex);
again:
key.objectid = device->devid;
key.offset = (u64)-1;
key.type = BTRFS_DEV_EXTENT_KEY;
do {
mutex_lock(&fs_info->delete_unused_bgs_mutex);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto done;
}
ret = btrfs_previous_item(root, path, 0, key.type);
if (ret)
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret < 0)
goto done;
if (ret) {
ret = 0;
btrfs_release_path(path);
break;
}
l = path->nodes[0];
slot = path->slots[0];
btrfs_item_key_to_cpu(l, &key, path->slots[0]);
if (key.objectid != device->devid) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
length = btrfs_dev_extent_length(l, dev_extent);
if (key.offset + length <= new_size) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
btrfs_release_path(path);
break;
}
chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
btrfs_release_path(path);
/*
* We may be relocating the only data chunk we have,
* which could potentially end up with losing data's
* raid profile, so lets allocate an empty one in
* advance.
*/
ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
if (ret < 0) {
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
goto done;
}
ret = btrfs_relocate_chunk(fs_info, chunk_offset);
mutex_unlock(&fs_info->delete_unused_bgs_mutex);
if (ret == -ENOSPC) {
failed++;
} else if (ret) {
if (ret == -ETXTBSY) {
btrfs_warn(fs_info,
"could not shrink block group %llu due to active swapfile",
chunk_offset);
}
goto done;
}
} while (key.offset-- > 0);
if (failed && !retried) {
failed = 0;
retried = true;
goto again;
} else if (failed && retried) {
ret = -ENOSPC;
goto done;
}
/* Shrinking succeeded, else we would be at "done". */
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto done;
}
mutex_lock(&fs_info->chunk_mutex);
/*
* We checked in the above loop all device extents that were already in
* the device tree. However before we have updated the device's
* total_bytes to the new size, we might have had chunk allocations that
* have not complete yet (new block groups attached to transaction
* handles), and therefore their device extents were not yet in the
* device tree and we missed them in the loop above. So if we have any
* pending chunk using a device extent that overlaps the device range
* that we can not use anymore, commit the current transaction and
* repeat the search on the device tree - this way we guarantee we will
* not have chunks using device extents that end beyond 'new_size'.
*/
if (!checked_pending_chunks) {
u64 start = new_size;
u64 len = old_size - new_size;
if (contains_pending_extent(trans->transaction, device,
&start, len)) {
mutex_unlock(&fs_info->chunk_mutex);
checked_pending_chunks = true;
failed = 0;
retried = false;
ret = btrfs_commit_transaction(trans);
if (ret)
goto done;
goto again;
}
}
btrfs_device_set_disk_total_bytes(device, new_size);
if (list_empty(&device->resized_list))
list_add_tail(&device->resized_list,
&fs_info->fs_devices->resized_devices);
WARN_ON(diff > old_total);
btrfs_set_super_total_bytes(super_copy,
round_down(old_total - diff, fs_info->sectorsize));
mutex_unlock(&fs_info->chunk_mutex);
/* Now btrfs_update_device() will change the on-disk size. */
ret = btrfs_update_device(trans, device);
if (ret < 0) {
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
} else {
ret = btrfs_commit_transaction(trans);
}
done:
btrfs_free_path(path);
if (ret) {
mutex_lock(&fs_info->chunk_mutex);
btrfs_device_set_total_bytes(device, old_size);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
device->fs_devices->total_rw_bytes += diff;
atomic64_add(diff, &fs_info->free_chunk_space);
mutex_unlock(&fs_info->chunk_mutex);
}
return ret;
}
static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
struct btrfs_key *key,
struct btrfs_chunk *chunk, int item_size)
{
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct btrfs_disk_key disk_key;
u32 array_size;
u8 *ptr;
mutex_lock(&fs_info->chunk_mutex);
array_size = btrfs_super_sys_array_size(super_copy);
if (array_size + item_size + sizeof(disk_key)
> BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
mutex_unlock(&fs_info->chunk_mutex);
return -EFBIG;
}
ptr = super_copy->sys_chunk_array + array_size;
btrfs_cpu_key_to_disk(&disk_key, key);
memcpy(ptr, &disk_key, sizeof(disk_key));
ptr += sizeof(disk_key);
memcpy(ptr, chunk, item_size);
item_size += sizeof(disk_key);
btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
mutex_unlock(&fs_info->chunk_mutex);
return 0;
}
/*
* sort the devices in descending order by max_avail, total_avail
*/
static int btrfs_cmp_device_info(const void *a, const void *b)
{
const struct btrfs_device_info *di_a = a;
const struct btrfs_device_info *di_b = b;
if (di_a->max_avail > di_b->max_avail)
return -1;
if (di_a->max_avail < di_b->max_avail)
return 1;
if (di_a->total_avail > di_b->total_avail)
return -1;
if (di_a->total_avail < di_b->total_avail)
return 1;
return 0;
}
static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
{
if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
return;
btrfs_set_fs_incompat(info, RAID56);
}
#define BTRFS_MAX_DEVS(info) ((BTRFS_MAX_ITEM_SIZE(info) \
- sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
#define BTRFS_MAX_DEVS_SYS_CHUNK ((BTRFS_SYSTEM_CHUNK_ARRAY_SIZE \
- 2 * sizeof(struct btrfs_disk_key) \
- 2 * sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 start, u64 type)
{
struct btrfs_fs_info *info = trans->fs_info;
struct btrfs_fs_devices *fs_devices = info->fs_devices;
struct btrfs_device *device;
struct map_lookup *map = NULL;
struct extent_map_tree *em_tree;
struct extent_map *em;
struct btrfs_device_info *devices_info = NULL;
u64 total_avail;
int num_stripes; /* total number of stripes to allocate */
int data_stripes; /* number of stripes that count for
block group size */
int sub_stripes; /* sub_stripes info for map */
int dev_stripes; /* stripes per dev */
int devs_max; /* max devs to use */
int devs_min; /* min devs needed */
int devs_increment; /* ndevs has to be a multiple of this */
int ncopies; /* how many copies to data has */
int nparity; /* number of stripes worth of bytes to
store parity information */
int ret;
u64 max_stripe_size;
u64 max_chunk_size;
u64 stripe_size;
u64 chunk_size;
int ndevs;
int i;
int j;
int index;
BUG_ON(!alloc_profile_is_valid(type, 0));
if (list_empty(&fs_devices->alloc_list)) {
if (btrfs_test_opt(info, ENOSPC_DEBUG))
btrfs_debug(info, "%s: no writable device", __func__);
return -ENOSPC;
}
index = btrfs_bg_flags_to_raid_index(type);
sub_stripes = btrfs_raid_array[index].sub_stripes;
dev_stripes = btrfs_raid_array[index].dev_stripes;
devs_max = btrfs_raid_array[index].devs_max;
devs_min = btrfs_raid_array[index].devs_min;
devs_increment = btrfs_raid_array[index].devs_increment;
ncopies = btrfs_raid_array[index].ncopies;
nparity = btrfs_raid_array[index].nparity;
if (type & BTRFS_BLOCK_GROUP_DATA) {
max_stripe_size = SZ_1G;
max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info);
} else if (type & BTRFS_BLOCK_GROUP_METADATA) {
/* for larger filesystems, use larger metadata chunks */
if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
max_stripe_size = SZ_1G;
else
max_stripe_size = SZ_256M;
max_chunk_size = max_stripe_size;
if (!devs_max)
devs_max = BTRFS_MAX_DEVS(info);
} else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
max_stripe_size = SZ_32M;
max_chunk_size = 2 * max_stripe_size;
if (!devs_max)
devs_max = BTRFS_MAX_DEVS_SYS_CHUNK;
} else {
btrfs_err(info, "invalid chunk type 0x%llx requested",
type);
BUG_ON(1);
}
/* We don't want a chunk larger than 10% of writable space */
max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
max_chunk_size);
devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
GFP_NOFS);
if (!devices_info)
return -ENOMEM;
/*
* in the first pass through the devices list, we gather information
* about the available holes on each device.
*/
ndevs = 0;
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
u64 max_avail;
u64 dev_offset;
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
WARN(1, KERN_ERR
"BTRFS: read-only device in alloc_list\n");
continue;
}
if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&device->dev_state) ||
test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
continue;
if (device->total_bytes > device->bytes_used)
total_avail = device->total_bytes - device->bytes_used;
else
total_avail = 0;
/* If there is no space on this device, skip it. */
if (total_avail == 0)
continue;
ret = find_free_dev_extent(trans, device,
max_stripe_size * dev_stripes,
&dev_offset, &max_avail);
if (ret && ret != -ENOSPC)
goto error;
if (ret == 0)
max_avail = max_stripe_size * dev_stripes;
if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) {
if (btrfs_test_opt(info, ENOSPC_DEBUG))
btrfs_debug(info,
"%s: devid %llu has no free space, have=%llu want=%u",
__func__, device->devid, max_avail,
BTRFS_STRIPE_LEN * dev_stripes);
continue;
}
if (ndevs == fs_devices->rw_devices) {
WARN(1, "%s: found more than %llu devices\n",
__func__, fs_devices->rw_devices);
break;
}
devices_info[ndevs].dev_offset = dev_offset;
devices_info[ndevs].max_avail = max_avail;
devices_info[ndevs].total_avail = total_avail;
devices_info[ndevs].dev = device;
++ndevs;
}
/*
* now sort the devices by hole size / available space
*/
sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
btrfs_cmp_device_info, NULL);
/* round down to number of usable stripes */
ndevs = round_down(ndevs, devs_increment);
if (ndevs < devs_min) {
ret = -ENOSPC;
if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
btrfs_debug(info,
"%s: not enough devices with free space: have=%d minimum required=%d",
__func__, ndevs, devs_min);
}
goto error;
}
ndevs = min(ndevs, devs_max);
/*
* The primary goal is to maximize the number of stripes, so use as
* many devices as possible, even if the stripes are not maximum sized.
*
* The DUP profile stores more than one stripe per device, the
* max_avail is the total size so we have to adjust.
*/
stripe_size = div_u64(devices_info[ndevs - 1].max_avail, dev_stripes);
num_stripes = ndevs * dev_stripes;
/*
* this will have to be fixed for RAID1 and RAID10 over
* more drives
*/
data_stripes = (num_stripes - nparity) / ncopies;
/*
* Use the number of data stripes to figure out how big this chunk
* is really going to be in terms of logical address space,
* and compare that answer with the max chunk size. If it's higher,
* we try to reduce stripe_size.
*/
if (stripe_size * data_stripes > max_chunk_size) {
/*
* Reduce stripe_size, round it up to a 16MB boundary again and
* then use it, unless it ends up being even bigger than the
* previous value we had already.
*/
stripe_size = min(round_up(div_u64(max_chunk_size,
data_stripes), SZ_16M),
stripe_size);
}
/* align to BTRFS_STRIPE_LEN */
stripe_size = round_down(stripe_size, BTRFS_STRIPE_LEN);
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
ret = -ENOMEM;
goto error;
}
map->num_stripes = num_stripes;
for (i = 0; i < ndevs; ++i) {
for (j = 0; j < dev_stripes; ++j) {
int s = i * dev_stripes + j;
map->stripes[s].dev = devices_info[i].dev;
map->stripes[s].physical = devices_info[i].dev_offset +
j * stripe_size;
}
}
map->stripe_len = BTRFS_STRIPE_LEN;
map->io_align = BTRFS_STRIPE_LEN;
map->io_width = BTRFS_STRIPE_LEN;
map->type = type;
map->sub_stripes = sub_stripes;
chunk_size = stripe_size * data_stripes;
trace_btrfs_chunk_alloc(info, map, start, chunk_size);
em = alloc_extent_map();
if (!em) {
kfree(map);
ret = -ENOMEM;
goto error;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
em->start = start;
em->len = chunk_size;
em->block_start = 0;
em->block_len = em->len;
em->orig_block_len = stripe_size;
em_tree = &info->mapping_tree.map_tree;
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em, 0);
if (ret) {
write_unlock(&em_tree->lock);
free_extent_map(em);
goto error;
}
list_add_tail(&em->list, &trans->transaction->pending_chunks);
refcount_inc(&em->refs);
write_unlock(&em_tree->lock);
ret = btrfs_make_block_group(trans, 0, type, start, chunk_size);
if (ret)
goto error_del_extent;
for (i = 0; i < map->num_stripes; i++)
btrfs_device_set_bytes_used(map->stripes[i].dev,
map->stripes[i].dev->bytes_used + stripe_size);
atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space);
free_extent_map(em);
check_raid56_incompat_flag(info, type);
kfree(devices_info);
return 0;
error_del_extent:
write_lock(&em_tree->lock);
remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
/* One for our allocation */
free_extent_map(em);
/* One for the tree reference */
free_extent_map(em);
/* One for the pending_chunks list reference */
free_extent_map(em);
error:
kfree(devices_info);
return ret;
}
int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
u64 chunk_offset, u64 chunk_size)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *extent_root = fs_info->extent_root;
struct btrfs_root *chunk_root = fs_info->chunk_root;
struct btrfs_key key;
struct btrfs_device *device;
struct btrfs_chunk *chunk;
struct btrfs_stripe *stripe;
struct extent_map *em;
struct map_lookup *map;
size_t item_size;
u64 dev_offset;
u64 stripe_size;
int i = 0;
int ret = 0;
em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
if (IS_ERR(em))
return PTR_ERR(em);
map = em->map_lookup;
item_size = btrfs_chunk_item_size(map->num_stripes);
stripe_size = em->orig_block_len;
chunk = kzalloc(item_size, GFP_NOFS);
if (!chunk) {
ret = -ENOMEM;
goto out;
}
/*
* Take the device list mutex to prevent races with the final phase of
* a device replace operation that replaces the device object associated
* with the map's stripes, because the device object's id can change
* at any time during that final phase of the device replace operation
* (dev-replace.c:btrfs_dev_replace_finishing()).
*/
mutex_lock(&fs_info->fs_devices->device_list_mutex);
for (i = 0; i < map->num_stripes; i++) {
device = map->stripes[i].dev;
dev_offset = map->stripes[i].physical;
ret = btrfs_update_device(trans, device);
if (ret)
break;
ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
dev_offset, stripe_size);
if (ret)
break;
}
if (ret) {
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
goto out;
}
stripe = &chunk->stripe;
for (i = 0; i < map->num_stripes; i++) {
device = map->stripes[i].dev;
dev_offset = map->stripes[i].physical;
btrfs_set_stack_stripe_devid(stripe, device->devid);
btrfs_set_stack_stripe_offset(stripe, dev_offset);
memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
stripe++;
}
mutex_unlock(&fs_info->fs_devices->device_list_mutex);
btrfs_set_stack_chunk_length(chunk, chunk_size);
btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
btrfs_set_stack_chunk_type(chunk, map->type);
btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
key.type = BTRFS_CHUNK_ITEM_KEY;
key.offset = chunk_offset;
ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
/*
* TODO: Cleanup of inserted chunk root in case of
* failure.
*/
ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
}
out:
kfree(chunk);
free_extent_map(em);
return ret;
}
/*
* Chunk allocation falls into two parts. The first part does work
* that makes the new allocated chunk usable, but does not do any operation
* that modifies the chunk tree. The second part does the work that
* requires modifying the chunk tree. This division is important for the
* bootstrap process of adding storage to a seed btrfs.
*/
int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
{
u64 chunk_offset;
lockdep_assert_held(&trans->fs_info->chunk_mutex);
chunk_offset = find_next_chunk(trans->fs_info);
return __btrfs_alloc_chunk(trans, chunk_offset, type);
}
static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
u64 chunk_offset;
u64 sys_chunk_offset;
u64 alloc_profile;
int ret;
chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_metadata_alloc_profile(fs_info);
ret = __btrfs_alloc_chunk(trans, chunk_offset, alloc_profile);
if (ret)
return ret;
sys_chunk_offset = find_next_chunk(fs_info);
alloc_profile = btrfs_system_alloc_profile(fs_info);
ret = __btrfs_alloc_chunk(trans, sys_chunk_offset, alloc_profile);
return ret;
}
static inline int btrfs_chunk_max_errors(struct map_lookup *map)
{
int max_errors;
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_DUP)) {
max_errors = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
max_errors = 2;
} else {
max_errors = 0;
}
return max_errors;
}
int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
{
struct extent_map *em;
struct map_lookup *map;
int readonly = 0;
int miss_ndevs = 0;
int i;
em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
if (IS_ERR(em))
return 1;
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
if (test_bit(BTRFS_DEV_STATE_MISSING,
&map->stripes[i].dev->dev_state)) {
miss_ndevs++;
continue;
}
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
&map->stripes[i].dev->dev_state)) {
readonly = 1;
goto end;
}
}
/*
* If the number of missing devices is larger than max errors,
* we can not write the data into that chunk successfully, so
* set it readonly.
*/
if (miss_ndevs > btrfs_chunk_max_errors(map))
readonly = 1;
end:
free_extent_map(em);
return readonly;
}
void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
{
extent_map_tree_init(&tree->map_tree);
}
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
{
struct extent_map *em;
while (1) {
write_lock(&tree->map_tree.lock);
em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
if (em)
remove_extent_mapping(&tree->map_tree, em);
write_unlock(&tree->map_tree.lock);
if (!em)
break;
/* once for us */
free_extent_map(em);
/* once for the tree */
free_extent_map(em);
}
}
int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{
struct extent_map *em;
struct map_lookup *map;
int ret;
em = btrfs_get_chunk_map(fs_info, logical, len);
if (IS_ERR(em))
/*
* We could return errors for these cases, but that could get
* ugly and we'd probably do the same thing which is just not do
* anything else and exit, so return 1 so the callers don't try
* to use other copies.
*/
return 1;
map = em->map_lookup;
if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
ret = map->sub_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
ret = 2;
else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
/*
* There could be two corrupted data stripes, we need
* to loop retry in order to rebuild the correct data.
*
* Fail a stripe at a time on every retry except the
* stripe under reconstruction.
*/
ret = map->num_stripes;
else
ret = 1;
free_extent_map(em);
down_read(&fs_info->dev_replace.rwsem);
if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
fs_info->dev_replace.tgtdev)
ret++;
up_read(&fs_info->dev_replace.rwsem);
return ret;
}
unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
u64 logical)
{
struct extent_map *em;
struct map_lookup *map;
unsigned long len = fs_info->sectorsize;
em = btrfs_get_chunk_map(fs_info, logical, len);
if (!WARN_ON(IS_ERR(em))) {
map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
len = map->stripe_len * nr_data_stripes(map);
free_extent_map(em);
}
return len;
}
int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
{
struct extent_map *em;
struct map_lookup *map;
int ret = 0;
em = btrfs_get_chunk_map(fs_info, logical, len);
if(!WARN_ON(IS_ERR(em))) {
map = em->map_lookup;
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
ret = 1;
free_extent_map(em);
}
return ret;
}
static int find_live_mirror(struct btrfs_fs_info *fs_info,
struct map_lookup *map, int first,
int dev_replace_is_ongoing)
{
int i;
int num_stripes;
int preferred_mirror;
int tolerance;
struct btrfs_device *srcdev;
ASSERT((map->type &
(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)));
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
num_stripes = map->sub_stripes;
else
num_stripes = map->num_stripes;
preferred_mirror = first + current->pid % num_stripes;
if (dev_replace_is_ongoing &&
fs_info->dev_replace.cont_reading_from_srcdev_mode ==
BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
srcdev = fs_info->dev_replace.srcdev;
else
srcdev = NULL;
/*
* try to avoid the drive that is the source drive for a
* dev-replace procedure, only choose it if no other non-missing
* mirror is available
*/
for (tolerance = 0; tolerance < 2; tolerance++) {
if (map->stripes[preferred_mirror].dev->bdev &&
(tolerance || map->stripes[preferred_mirror].dev != srcdev))
return preferred_mirror;
for (i = first; i < first + num_stripes; i++) {
if (map->stripes[i].dev->bdev &&
(tolerance || map->stripes[i].dev != srcdev))
return i;
}
}
/* we couldn't find one that doesn't fail. Just return something
* and the io error handling code will clean up eventually
*/
return preferred_mirror;
}
static inline int parity_smaller(u64 a, u64 b)
{
return a > b;
}
/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
{
struct btrfs_bio_stripe s;
int i;
u64 l;
int again = 1;
while (again) {
again = 0;
for (i = 0; i < num_stripes - 1; i++) {
if (parity_smaller(bbio->raid_map[i],
bbio->raid_map[i+1])) {
s = bbio->stripes[i];
l = bbio->raid_map[i];
bbio->stripes[i] = bbio->stripes[i+1];
bbio->raid_map[i] = bbio->raid_map[i+1];
bbio->stripes[i+1] = s;
bbio->raid_map[i+1] = l;
again = 1;
}
}
}
}
static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
{
struct btrfs_bio *bbio = kzalloc(
/* the size of the btrfs_bio */
sizeof(struct btrfs_bio) +
/* plus the variable array for the stripes */
sizeof(struct btrfs_bio_stripe) * (total_stripes) +
/* plus the variable array for the tgt dev */
sizeof(int) * (real_stripes) +
/*
* plus the raid_map, which includes both the tgt dev
* and the stripes
*/
sizeof(u64) * (total_stripes),
GFP_NOFS|__GFP_NOFAIL);
atomic_set(&bbio->error, 0);
refcount_set(&bbio->refs, 1);
return bbio;
}
void btrfs_get_bbio(struct btrfs_bio *bbio)
{
WARN_ON(!refcount_read(&bbio->refs));
refcount_inc(&bbio->refs);
}
void btrfs_put_bbio(struct btrfs_bio *bbio)
{
if (!bbio)
return;
if (refcount_dec_and_test(&bbio->refs))
kfree(bbio);
}
/* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
/*
* Please note that, discard won't be sent to target device of device
* replace.
*/
static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
u64 logical, u64 length,
struct btrfs_bio **bbio_ret)
{
struct extent_map *em;
struct map_lookup *map;
struct btrfs_bio *bbio;
u64 offset;
u64 stripe_nr;
u64 stripe_nr_end;
u64 stripe_end_offset;
u64 stripe_cnt;
u64 stripe_len;
u64 stripe_offset;
u64 num_stripes;
u32 stripe_index;
u32 factor = 0;
u32 sub_stripes = 0;
u64 stripes_per_dev = 0;
u32 remaining_stripes = 0;
u32 last_stripe = 0;
int ret = 0;
int i;
/* discard always return a bbio */
ASSERT(bbio_ret);
em = btrfs_get_chunk_map(fs_info, logical, length);
if (IS_ERR(em))
return PTR_ERR(em);
map = em->map_lookup;
/* we don't discard raid56 yet */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = -EOPNOTSUPP;
goto out;
}
offset = logical - em->start;
length = min_t(u64, em->len - offset, length);
stripe_len = map->stripe_len;
/*
* stripe_nr counts the total number of stripes we have to stride
* to get to this block
*/
stripe_nr = div64_u64(offset, stripe_len);
/* stripe_offset is the offset of this block in its stripe */
stripe_offset = offset - stripe_nr * stripe_len;
stripe_nr_end = round_up(offset + length, map->stripe_len);
stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
stripe_cnt = stripe_nr_end - stripe_nr;
stripe_end_offset = stripe_nr_end * map->stripe_len -
(offset + length);
/*
* after this, stripe_nr is the number of stripes on this
* device we have to walk to find the data, and stripe_index is
* the number of our device in the stripe array
*/
num_stripes = 1;
stripe_index = 0;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
if (map->type & BTRFS_BLOCK_GROUP_RAID0)
sub_stripes = 1;
else
sub_stripes = map->sub_stripes;
factor = map->num_stripes / sub_stripes;
num_stripes = min_t(u64, map->num_stripes,
sub_stripes * stripe_cnt);
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= sub_stripes;
stripes_per_dev = div_u64_rem(stripe_cnt, factor,
&remaining_stripes);
div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
last_stripe *= sub_stripes;
} else if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_DUP)) {
num_stripes = map->num_stripes;
} else {
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
}
bbio = alloc_btrfs_bio(num_stripes, 0);
if (!bbio) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < num_stripes; i++) {
bbio->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset + stripe_nr * map->stripe_len;
bbio->stripes[i].dev = map->stripes[stripe_index].dev;
if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
BTRFS_BLOCK_GROUP_RAID10)) {
bbio->stripes[i].length = stripes_per_dev *
map->stripe_len;
if (i / sub_stripes < remaining_stripes)
bbio->stripes[i].length +=
map->stripe_len;
/*
* Special for the first stripe and
* the last stripe:
*
* |-------|...|-------|
* |----------|
* off end_off
*/
if (i < sub_stripes)
bbio->stripes[i].length -=
stripe_offset;
if (stripe_index >= last_stripe &&
stripe_index <= (last_stripe +
sub_stripes - 1))
bbio->stripes[i].length -=
stripe_end_offset;
if (i == sub_stripes - 1)
stripe_offset = 0;
} else {
bbio->stripes[i].length = length;
}
stripe_index++;
if (stripe_index == map->num_stripes) {
stripe_index = 0;
stripe_nr++;
}
}
*bbio_ret = bbio;
bbio->map_type = map->type;
bbio->num_stripes = num_stripes;
out:
free_extent_map(em);
return ret;
}
/*
* In dev-replace case, for repair case (that's the only case where the mirror
* is selected explicitly when calling btrfs_map_block), blocks left of the
* left cursor can also be read from the target drive.
*
* For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
* array of stripes.
* For READ, it also needs to be supported using the same mirror number.
*
* If the requested block is not left of the left cursor, EIO is returned. This
* can happen because btrfs_num_copies() returns one more in the dev-replace
* case.
*/
static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
u64 logical, u64 length,
u64 srcdev_devid, int *mirror_num,
u64 *physical)
{
struct btrfs_bio *bbio = NULL;
int num_stripes;
int index_srcdev = 0;
int found = 0;
u64 physical_of_found = 0;
int i;
int ret = 0;
ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
logical, &length, &bbio, 0, 0);
if (ret) {
ASSERT(bbio == NULL);
return ret;
}
num_stripes = bbio->num_stripes;
if (*mirror_num > num_stripes) {
/*
* BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
* that means that the requested area is not left of the left
* cursor
*/
btrfs_put_bbio(bbio);
return -EIO;
}
/*
* process the rest of the function using the mirror_num of the source
* drive. Therefore look it up first. At the end, patch the device
* pointer to the one of the target drive.
*/
for (i = 0; i < num_stripes; i++) {
if (bbio->stripes[i].dev->devid != srcdev_devid)
continue;
/*
* In case of DUP, in order to keep it simple, only add the
* mirror with the lowest physical address
*/
if (found &&
physical_of_found <= bbio->stripes[i].physical)
continue;
index_srcdev = i;
found = 1;
physical_of_found = bbio->stripes[i].physical;
}
btrfs_put_bbio(bbio);
ASSERT(found);
if (!found)
return -EIO;
*mirror_num = index_srcdev + 1;
*physical = physical_of_found;
return ret;
}
static void handle_ops_on_dev_replace(enum btrfs_map_op op,
struct btrfs_bio **bbio_ret,
struct btrfs_dev_replace *dev_replace,
int *num_stripes_ret, int *max_errors_ret)
{
struct btrfs_bio *bbio = *bbio_ret;
u64 srcdev_devid = dev_replace->srcdev->devid;
int tgtdev_indexes = 0;
int num_stripes = *num_stripes_ret;
int max_errors = *max_errors_ret;
int i;
if (op == BTRFS_MAP_WRITE) {
int index_where_to_add;
/*
* duplicate the write operations while the dev replace
* procedure is running. Since the copying of the old disk to
* the new disk takes place at run time while the filesystem is
* mounted writable, the regular write operations to the old
* disk have to be duplicated to go to the new disk as well.
*
* Note that device->missing is handled by the caller, and that
* the write to the old disk is already set up in the stripes
* array.
*/
index_where_to_add = num_stripes;
for (i = 0; i < num_stripes; i++) {
if (bbio->stripes[i].dev->devid == srcdev_devid) {
/* write to new disk, too */
struct btrfs_bio_stripe *new =
bbio->stripes + index_where_to_add;
struct btrfs_bio_stripe *old =
bbio->stripes + i;
new->physical = old->physical;
new->length = old->length;
new->dev = dev_replace->tgtdev;
bbio->tgtdev_map[i] = index_where_to_add;
index_where_to_add++;
max_errors++;
tgtdev_indexes++;
}
}
num_stripes = index_where_to_add;
} else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
int index_srcdev = 0;
int found = 0;
u64 physical_of_found = 0;
/*
* During the dev-replace procedure, the target drive can also
* be used to read data in case it is needed to repair a corrupt
* block elsewhere. This is possible if the requested area is
* left of the left cursor. In this area, the target drive is a
* full copy of the source drive.
*/
for (i = 0; i < num_stripes; i++) {
if (bbio->stripes[i].dev->devid == srcdev_devid) {
/*
* In case of DUP, in order to keep it simple,
* only add the mirror with the lowest physical
* address
*/
if (found &&
physical_of_found <=
bbio->stripes[i].physical)
continue;
index_srcdev = i;
found = 1;
physical_of_found = bbio->stripes[i].physical;
}
}
if (found) {
struct btrfs_bio_stripe *tgtdev_stripe =
bbio->stripes + num_stripes;
tgtdev_stripe->physical = physical_of_found;
tgtdev_stripe->length =
bbio->stripes[index_srcdev].length;
tgtdev_stripe->dev = dev_replace->tgtdev;
bbio->tgtdev_map[index_srcdev] = num_stripes;
tgtdev_indexes++;
num_stripes++;
}
}
*num_stripes_ret = num_stripes;
*max_errors_ret = max_errors;
bbio->num_tgtdevs = tgtdev_indexes;
*bbio_ret = bbio;
}
static bool need_full_stripe(enum btrfs_map_op op)
{
return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
}
static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
int mirror_num, int need_raid_map)
{
struct extent_map *em;
struct map_lookup *map;
u64 offset;
u64 stripe_offset;
u64 stripe_nr;
u64 stripe_len;
u32 stripe_index;
int i;
int ret = 0;
int num_stripes;
int max_errors = 0;
int tgtdev_indexes = 0;
struct btrfs_bio *bbio = NULL;
struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
int dev_replace_is_ongoing = 0;
int num_alloc_stripes;
int patch_the_first_stripe_for_dev_replace = 0;
u64 physical_to_patch_in_first_stripe = 0;
u64 raid56_full_stripe_start = (u64)-1;
if (op == BTRFS_MAP_DISCARD)
return __btrfs_map_block_for_discard(fs_info, logical,
*length, bbio_ret);
em = btrfs_get_chunk_map(fs_info, logical, *length);
if (IS_ERR(em))
return PTR_ERR(em);
map = em->map_lookup;
offset = logical - em->start;
stripe_len = map->stripe_len;
stripe_nr = offset;
/*
* stripe_nr counts the total number of stripes we have to stride
* to get to this block
*/
stripe_nr = div64_u64(stripe_nr, stripe_len);
stripe_offset = stripe_nr * stripe_len;
if (offset < stripe_offset) {
btrfs_crit(fs_info,
"stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
stripe_offset, offset, em->start, logical,
stripe_len);
free_extent_map(em);
return -EINVAL;
}
/* stripe_offset is the offset of this block in its stripe*/
stripe_offset = offset - stripe_offset;
/* if we're here for raid56, we need to know the stripe aligned start */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
raid56_full_stripe_start = offset;
/* allow a write of a full stripe, but make sure we don't
* allow straddling of stripes
*/
raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
full_stripe_len);
raid56_full_stripe_start *= full_stripe_len;
}
if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
u64 max_len;
/* For writes to RAID[56], allow a full stripeset across all disks.
For other RAID types and for RAID[56] reads, just allow a single
stripe (on a single disk). */
if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
(op == BTRFS_MAP_WRITE)) {
max_len = stripe_len * nr_data_stripes(map) -
(offset - raid56_full_stripe_start);
} else {
/* we limit the length of each bio to what fits in a stripe */
max_len = stripe_len - stripe_offset;
}
*length = min_t(u64, em->len - offset, max_len);
} else {
*length = em->len - offset;
}
/*
* This is for when we're called from btrfs_bio_fits_in_stripe and all
* it cares about is the length
*/
if (!bbio_ret)
goto out;
down_read(&dev_replace->rwsem);
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
/*
* Hold the semaphore for read during the whole operation, write is
* requested at commit time but must wait.
*/
if (!dev_replace_is_ongoing)
up_read(&dev_replace->rwsem);
if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
!need_full_stripe(op) && dev_replace->tgtdev != NULL) {
ret = get_extra_mirror_from_replace(fs_info, logical, *length,
dev_replace->srcdev->devid,
&mirror_num,
&physical_to_patch_in_first_stripe);
if (ret)
goto out;
else
patch_the_first_stripe_for_dev_replace = 1;
} else if (mirror_num > map->num_stripes) {
mirror_num = 0;
}
num_stripes = 1;
stripe_index = 0;
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
if (!need_full_stripe(op))
mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
if (need_full_stripe(op))
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
else {
stripe_index = find_live_mirror(fs_info, map, 0,
dev_replace_is_ongoing);
mirror_num = stripe_index + 1;
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
if (need_full_stripe(op)) {
num_stripes = map->num_stripes;
} else if (mirror_num) {
stripe_index = mirror_num - 1;
} else {
mirror_num = 1;
}
} else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
u32 factor = map->num_stripes / map->sub_stripes;
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= map->sub_stripes;
if (need_full_stripe(op))
num_stripes = map->sub_stripes;
else if (mirror_num)
stripe_index += mirror_num - 1;
else {
int old_stripe_index = stripe_index;
stripe_index = find_live_mirror(fs_info, map,
stripe_index,
dev_replace_is_ongoing);
mirror_num = stripe_index - old_stripe_index + 1;
}
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */
stripe_nr = div64_u64(raid56_full_stripe_start,
stripe_len * nr_data_stripes(map));
/* RAID[56] write or recovery. Return all stripes */
num_stripes = map->num_stripes;
max_errors = nr_parity_stripes(map);
*length = map->stripe_len;
stripe_index = 0;
stripe_offset = 0;
} else {
/*
* Mirror #0 or #1 means the original data block.
* Mirror #2 is RAID5 parity block.
* Mirror #3 is RAID6 Q block.
*/
stripe_nr = div_u64_rem(stripe_nr,
nr_data_stripes(map), &stripe_index);
if (mirror_num > 1)
stripe_index = nr_data_stripes(map) +
mirror_num - 2;
/* We distribute the parity blocks across stripes */
div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
&stripe_index);
if (!need_full_stripe(op) && mirror_num <= 1)
mirror_num = 1;
}
} else {
/*
* after this, stripe_nr is the number of stripes on this
* device we have to walk to find the data, and stripe_index is
* the number of our device in the stripe array
*/
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
mirror_num = stripe_index + 1;
}
if (stripe_index >= map->num_stripes) {
btrfs_crit(fs_info,
"stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
stripe_index, map->num_stripes);
ret = -EINVAL;
goto out;
}
num_alloc_stripes = num_stripes;
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
if (op == BTRFS_MAP_WRITE)
num_alloc_stripes <<= 1;
if (op == BTRFS_MAP_GET_READ_MIRRORS)
num_alloc_stripes++;
tgtdev_indexes = num_stripes;
}
bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
if (!bbio) {
ret = -ENOMEM;
goto out;
}
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL)
bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
/* build raid_map */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
(need_full_stripe(op) || mirror_num > 1)) {
u64 tmp;
unsigned rot;
bbio->raid_map = (u64 *)((void *)bbio->stripes +
sizeof(struct btrfs_bio_stripe) *
num_alloc_stripes +
sizeof(int) * tgtdev_indexes);
/* Work out the disk rotation on this stripe-set */
div_u64_rem(stripe_nr, num_stripes, &rot);
/* Fill in the logical address of each stripe */
tmp = stripe_nr * nr_data_stripes(map);
for (i = 0; i < nr_data_stripes(map); i++)
bbio->raid_map[(i+rot) % num_stripes] =
em->start + (tmp + i) * map->stripe_len;
bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
if (map->type & BTRFS_BLOCK_GROUP_RAID6)
bbio->raid_map[(i+rot+1) % num_stripes] =
RAID6_Q_STRIPE;
}
for (i = 0; i < num_stripes; i++) {
bbio->stripes[i].physical =
map->stripes[stripe_index].physical +
stripe_offset +
stripe_nr * map->stripe_len;
bbio->stripes[i].dev =
map->stripes[stripe_index].dev;
stripe_index++;
}
if (need_full_stripe(op))
max_errors = btrfs_chunk_max_errors(map);
if (bbio->raid_map)
sort_parity_stripes(bbio, num_stripes);
if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
need_full_stripe(op)) {
handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
&max_errors);
}
*bbio_ret = bbio;
bbio->map_type = map->type;
bbio->num_stripes = num_stripes;
bbio->max_errors = max_errors;
bbio->mirror_num = mirror_num;
/*
* this is the case that REQ_READ && dev_replace_is_ongoing &&
* mirror_num == num_stripes + 1 && dev_replace target drive is
* available as a mirror
*/
if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
WARN_ON(num_stripes > 1);
bbio->stripes[0].dev = dev_replace->tgtdev;
bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
bbio->mirror_num = map->num_stripes + 1;
}
out:
if (dev_replace_is_ongoing) {
lockdep_assert_held(&dev_replace->rwsem);
/* Unlock and let waiting writers proceed */
up_read(&dev_replace->rwsem);
}
free_extent_map(em);
return ret;
}
int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num)
{
return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
mirror_num, 0);
}
/* For Scrub/replace */
int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret)
{
return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
}
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
u64 physical, u64 **logical, int *naddrs, int *stripe_len)
{
struct extent_map *em;
struct map_lookup *map;
u64 *buf;
u64 bytenr;
u64 length;
u64 stripe_nr;
u64 rmap_len;
int i, j, nr = 0;
em = btrfs_get_chunk_map(fs_info, chunk_start, 1);
if (IS_ERR(em))
return -EIO;
map = em->map_lookup;
length = em->len;
rmap_len = map->stripe_len;
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
length = div_u64(length, map->num_stripes / map->sub_stripes);
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
length = div_u64(length, map->num_stripes);
else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
length = div_u64(length, nr_data_stripes(map));
rmap_len = map->stripe_len * nr_data_stripes(map);
}
buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS);
BUG_ON(!buf); /* -ENOMEM */
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].physical > physical ||
map->stripes[i].physical + length <= physical)
continue;
stripe_nr = physical - map->stripes[i].physical;
stripe_nr = div64_u64(stripe_nr, map->stripe_len);
if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
stripe_nr = stripe_nr * map->num_stripes + i;
stripe_nr = div_u64(stripe_nr, map->sub_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = stripe_nr * map->num_stripes + i;
} /* else if RAID[56], multiply by nr_data_stripes().
* Alternatively, just use rmap_len below instead of
* map->stripe_len */
bytenr = chunk_start + stripe_nr * rmap_len;
WARN_ON(nr >= map->num_stripes);
for (j = 0; j < nr; j++) {
if (buf[j] == bytenr)
break;
}
if (j == nr) {
WARN_ON(nr >= map->num_stripes);
buf[nr++] = bytenr;
}
}
*logical = buf;
*naddrs = nr;
*stripe_len = rmap_len;
free_extent_map(em);
return 0;
}
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
{
bio->bi_private = bbio->private;
bio->bi_end_io = bbio->end_io;
bio_endio(bio);
btrfs_put_bbio(bbio);
}
static void btrfs_end_bio(struct bio *bio)
{
struct btrfs_bio *bbio = bio->bi_private;
int is_orig_bio = 0;
if (bio->bi_status) {
atomic_inc(&bbio->error);
if (bio->bi_status == BLK_STS_IOERR ||
bio->bi_status == BLK_STS_TARGET) {
unsigned int stripe_index =
btrfs_io_bio(bio)->stripe_index;
struct btrfs_device *dev;
BUG_ON(stripe_index >= bbio->num_stripes);
dev = bbio->stripes[stripe_index].dev;
if (dev->bdev) {
if (bio_op(bio) == REQ_OP_WRITE)
btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_WRITE_ERRS);
else
btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_READ_ERRS);
if (bio->bi_opf & REQ_PREFLUSH)
btrfs_dev_stat_inc_and_print(dev,
BTRFS_DEV_STAT_FLUSH_ERRS);
}
}
}
if (bio == bbio->orig_bio)
is_orig_bio = 1;
btrfs_bio_counter_dec(bbio->fs_info);
if (atomic_dec_and_test(&bbio->stripes_pending)) {
if (!is_orig_bio) {
bio_put(bio);
bio = bbio->orig_bio;
}
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
/* only send an error to the higher layers if it is
* beyond the tolerance of the btrfs bio
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
bio->bi_status = BLK_STS_IOERR;
} else {
/*
* this bio is actually up to date, we didn't
* go over the max number of errors
*/
bio->bi_status = BLK_STS_OK;
}
btrfs_end_bbio(bbio, bio);
} else if (!is_orig_bio) {
bio_put(bio);
}
}
/*
* see run_scheduled_bios for a description of why bios are collected for
* async submit.
*
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
static noinline void btrfs_schedule_bio(struct btrfs_device *device,
struct bio *bio)
{
struct btrfs_fs_info *fs_info = device->fs_info;
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
/* don't bother with additional async steps for reads, right now */
if (bio_op(bio) == REQ_OP_READ) {
btrfsic_submit_bio(bio);
return;
}
WARN_ON(bio->bi_next);
bio->bi_next = NULL;
spin_lock(&device->io_lock);
if (op_is_sync(bio->bi_opf))
pending_bios = &device->pending_sync_bios;
else
pending_bios = &device->pending_bios;
if (pending_bios->tail)
pending_bios->tail->bi_next = bio;
pending_bios->tail = bio;
if (!pending_bios->head)
pending_bios->head = bio;
if (device->running_pending)
should_queue = 0;
spin_unlock(&device->io_lock);
if (should_queue)
btrfs_queue_work(fs_info->submit_workers, &device->work);
}
static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
u64 physical, int dev_nr, int async)
{
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
struct btrfs_fs_info *fs_info = bbio->fs_info;
bio->bi_private = bbio;
btrfs_io_bio(bio)->stripe_index = dev_nr;
bio->bi_end_io = btrfs_end_bio;
bio->bi_iter.bi_sector = physical >> 9;
btrfs_debug_in_rcu(fs_info,
"btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
(u_long)dev->bdev->bd_dev, rcu_str_deref(dev->name), dev->devid,
bio->bi_iter.bi_size);
bio_set_dev(bio, dev->bdev);
btrfs_bio_counter_inc_noblocked(fs_info);
if (async)
btrfs_schedule_bio(dev, bio);
else
btrfsic_submit_bio(bio);
}
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{
atomic_inc(&bbio->error);
if (atomic_dec_and_test(&bbio->stripes_pending)) {
/* Should be the original bio. */
WARN_ON(bio != bbio->orig_bio);
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio->bi_iter.bi_sector = logical >> 9;
if (atomic_read(&bbio->error) > bbio->max_errors)
bio->bi_status = BLK_STS_IOERR;
else
bio->bi_status = BLK_STS_OK;
btrfs_end_bbio(bbio, bio);
}
}
blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
int mirror_num, int async_submit)
{
struct btrfs_device *dev;
struct bio *first_bio = bio;
u64 logical = (u64)bio->bi_iter.bi_sector << 9;
u64 length = 0;
u64 map_length;
int ret;
int dev_nr;
int total_devs;
struct btrfs_bio *bbio = NULL;
length = bio->bi_iter.bi_size;
map_length = length;
btrfs_bio_counter_inc_blocked(fs_info);
ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
&map_length, &bbio, mirror_num, 1);
if (ret) {
btrfs_bio_counter_dec(fs_info);
return errno_to_blk_status(ret);
}
total_devs = bbio->num_stripes;
bbio->orig_bio = first_bio;
bbio->private = first_bio->bi_private;
bbio->end_io = first_bio->bi_end_io;
bbio->fs_info = fs_info;
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
if (bio_op(bio) == REQ_OP_WRITE) {
ret = raid56_parity_write(fs_info, bio, bbio,
map_length);
} else {
ret = raid56_parity_recover(fs_info, bio, bbio,
map_length, mirror_num, 1);
}
btrfs_bio_counter_dec(fs_info);
return errno_to_blk_status(ret);
}
if (map_length < length) {
btrfs_crit(fs_info,
"mapping failed logical %llu bio len %llu len %llu",
logical, length, map_length);
BUG();
}
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
&dev->dev_state) ||
(bio_op(first_bio) == REQ_OP_WRITE &&
!test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
bbio_error(bbio, first_bio, logical);
continue;
}
if (dev_nr < total_devs - 1)
bio = btrfs_bio_clone(first_bio);
else
bio = first_bio;
submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical,
dev_nr, async_submit);
}
btrfs_bio_counter_dec(fs_info);
return BLK_STS_OK;
}
/*
* Find a device specified by @devid or @uuid in the list of @fs_devices, or
* return NULL.
*
* If devid and uuid are both specified, the match must be exact, otherwise
* only devid is used.
*
* If @seed is true, traverse through the seed devices.
*/
struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *uuid, u8 *fsid,
bool seed)
{
struct btrfs_device *device;
while (fs_devices) {
if (!fsid ||
!memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
list_for_each_entry(device, &fs_devices->devices,
dev_list) {
if (device->devid == devid &&
(!uuid || memcmp(device->uuid, uuid,
BTRFS_UUID_SIZE) == 0))
return device;
}
}
if (seed)
fs_devices = fs_devices->seed;
else
return NULL;
}
return NULL;
}
static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
u64 devid, u8 *dev_uuid)
{
struct btrfs_device *device;
device = btrfs_alloc_device(NULL, &devid, dev_uuid);
if (IS_ERR(device))
return device;
list_add(&device->dev_list, &fs_devices->devices);
device->fs_devices = fs_devices;
fs_devices->num_devices++;
set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
fs_devices->missing_devices++;
return device;
}
/**
* btrfs_alloc_device - allocate struct btrfs_device
* @fs_info: used only for generating a new devid, can be NULL if
* devid is provided (i.e. @devid != NULL).
* @devid: a pointer to devid for this device. If NULL a new devid
* is generated.
* @uuid: a pointer to UUID for this device. If NULL a new UUID
* is generated.
*
* Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
* on error. Returned struct is not linked onto any lists and must be
* destroyed with btrfs_free_device.
*/
struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
const u64 *devid,
const u8 *uuid)
{
struct btrfs_device *dev;
u64 tmp;
if (WARN_ON(!devid && !fs_info))
return ERR_PTR(-EINVAL);
dev = __alloc_device();
if (IS_ERR(dev))
return dev;
if (devid)
tmp = *devid;
else {
int ret;
ret = find_next_devid(fs_info, &tmp);
if (ret) {
btrfs_free_device(dev);
return ERR_PTR(ret);
}
}
dev->devid = tmp;
if (uuid)
memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
else
generate_random_uuid(dev->uuid);
btrfs_init_work(&dev->work, btrfs_submit_helper,
pending_bios_fn, NULL, NULL);
return dev;
}
/* Return -EIO if any error, otherwise return 0. */
static int btrfs_check_chunk_valid(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk, u64 logical)
{
u64 length;
u64 stripe_len;
u16 num_stripes;
u16 sub_stripes;
u64 type;
u64 features;
bool mixed = false;
length = btrfs_chunk_length(leaf, chunk);
stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
type = btrfs_chunk_type(leaf, chunk);
if (!num_stripes) {
btrfs_err(fs_info, "invalid chunk num_stripes: %u",
num_stripes);
return -EIO;
}
if (!IS_ALIGNED(logical, fs_info->sectorsize)) {
btrfs_err(fs_info, "invalid chunk logical %llu", logical);
return -EIO;
}
if (btrfs_chunk_sector_size(leaf, chunk) != fs_info->sectorsize) {
btrfs_err(fs_info, "invalid chunk sectorsize %u",
btrfs_chunk_sector_size(leaf, chunk));
return -EIO;
}
if (!length || !IS_ALIGNED(length, fs_info->sectorsize)) {
btrfs_err(fs_info, "invalid chunk length %llu", length);
return -EIO;
}
if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
btrfs_err(fs_info, "invalid chunk stripe length: %llu",
stripe_len);
return -EIO;
}
if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) &
type) {
btrfs_err(fs_info, "unrecognized chunk type: %llu",
~(BTRFS_BLOCK_GROUP_TYPE_MASK |
BTRFS_BLOCK_GROUP_PROFILE_MASK) &
btrfs_chunk_type(leaf, chunk));
return -EIO;
}
if ((type & BTRFS_BLOCK_GROUP_TYPE_MASK) == 0) {
btrfs_err(fs_info, "missing chunk type flag: 0x%llx", type);
return -EIO;
}
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) &&
(type & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA))) {
btrfs_err(fs_info,
"system chunk with data or metadata type: 0x%llx", type);
return -EIO;
}
features = btrfs_super_incompat_flags(fs_info->super_copy);
if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
mixed = true;
if (!mixed) {
if ((type & BTRFS_BLOCK_GROUP_METADATA) &&
(type & BTRFS_BLOCK_GROUP_DATA)) {
btrfs_err(fs_info,
"mixed chunk type in non-mixed mode: 0x%llx", type);
return -EIO;
}
}
if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) ||
(type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) ||
(type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) ||
(type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) ||
(type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) ||
((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 &&
num_stripes != 1)) {
btrfs_err(fs_info,
"invalid num_stripes:sub_stripes %u:%u for profile %llu",
num_stripes, sub_stripes,
type & BTRFS_BLOCK_GROUP_PROFILE_MASK);
return -EIO;
}
return 0;
}
static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
u64 devid, u8 *uuid, bool error)
{
if (error)
btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
devid, uuid);
else
btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
devid, uuid);
}
static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
struct extent_buffer *leaf,
struct btrfs_chunk *chunk)
{
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct map_lookup *map;
struct extent_map *em;
u64 logical;
u64 length;
u64 devid;
u8 uuid[BTRFS_UUID_SIZE];
int num_stripes;
int ret;
int i;
logical = key->offset;
length = btrfs_chunk_length(leaf, chunk);
num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
ret = btrfs_check_chunk_valid(fs_info, leaf, chunk, logical);
if (ret)
return ret;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
read_unlock(&map_tree->map_tree.lock);
/* already mapped? */
if (em && em->start <= logical && em->start + em->len > logical) {
free_extent_map(em);
return 0;
} else if (em) {
free_extent_map(em);
}
em = alloc_extent_map();
if (!em)
return -ENOMEM;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
free_extent_map(em);
return -ENOMEM;
}
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->map_lookup = map;
em->start = logical;
em->len = length;
em->orig_start = 0;
em->block_start = 0;
em->block_len = em->len;
map->num_stripes = num_stripes;
map->io_width = btrfs_chunk_io_width(leaf, chunk);
map->io_align = btrfs_chunk_io_align(leaf, chunk);
map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
map->type = btrfs_chunk_type(leaf, chunk);
map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
map->verified_stripes = 0;
for (i = 0; i < num_stripes; i++) {
map->stripes[i].physical =
btrfs_stripe_offset_nr(leaf, chunk, i);
devid = btrfs_stripe_devid_nr(leaf, chunk, i);
read_extent_buffer(leaf, uuid, (unsigned long)
btrfs_stripe_dev_uuid_nr(chunk, i),
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
devid, uuid, NULL, true);
if (!map->stripes[i].dev &&
!btrfs_test_opt(fs_info, DEGRADED)) {
free_extent_map(em);
btrfs_report_missing_device(fs_info, devid, uuid, true);
return -ENOENT;
}
if (!map->stripes[i].dev) {
map->stripes[i].dev =
add_missing_dev(fs_info->fs_devices, devid,
uuid);
if (IS_ERR(map->stripes[i].dev)) {
free_extent_map(em);
btrfs_err(fs_info,
"failed to init missing dev %llu: %ld",
devid, PTR_ERR(map->stripes[i].dev));
return PTR_ERR(map->stripes[i].dev);
}
btrfs_report_missing_device(fs_info, devid, uuid, false);
}
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
&(map->stripes[i].dev->dev_state));
}
write_lock(&map_tree->map_tree.lock);
ret = add_extent_mapping(&map_tree->map_tree, em, 0);
write_unlock(&map_tree->map_tree.lock);
if (ret < 0) {
btrfs_err(fs_info,
"failed to add chunk map, start=%llu len=%llu: %d",
em->start, em->len, ret);
}
free_extent_map(em);
return ret;
}
static void fill_device_from_item(struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item,
struct btrfs_device *device)
{
unsigned long ptr;
device->devid = btrfs_device_id(leaf, dev_item);
device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
device->total_bytes = device->disk_total_bytes;
device->commit_total_bytes = device->disk_total_bytes;
device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
device->commit_bytes_used = device->bytes_used;
device->type = btrfs_device_type(leaf, dev_item);
device->io_align = btrfs_device_io_align(leaf, dev_item);
device->io_width = btrfs_device_io_width(leaf, dev_item);
device->sector_size = btrfs_device_sector_size(leaf, dev_item);
WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
ptr = btrfs_device_uuid(dev_item);
read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
}
static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
u8 *fsid)
{
struct btrfs_fs_devices *fs_devices;
int ret;
lockdep_assert_held(&uuid_mutex);
ASSERT(fsid);
fs_devices = fs_info->fs_devices->seed;
while (fs_devices) {
if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
return fs_devices;
fs_devices = fs_devices->seed;
}
fs_devices = find_fsid(fsid, NULL);
if (!fs_devices) {
if (!btrfs_test_opt(fs_info, DEGRADED))
return ERR_PTR(-ENOENT);
fs_devices = alloc_fs_devices(fsid, NULL);
if (IS_ERR(fs_devices))
return fs_devices;
fs_devices->seeding = 1;
fs_devices->opened = 1;
return fs_devices;
}
fs_devices = clone_fs_devices(fs_devices);
if (IS_ERR(fs_devices))
return fs_devices;
ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
if (ret) {
free_fs_devices(fs_devices);
fs_devices = ERR_PTR(ret);
goto out;
}
if (!fs_devices->seeding) {
close_fs_devices(fs_devices);
free_fs_devices(fs_devices);
fs_devices = ERR_PTR(-EINVAL);
goto out;
}
fs_devices->seed = fs_info->fs_devices->seed;
fs_info->fs_devices->seed = fs_devices;
out:
return fs_devices;
}
static int read_one_dev(struct btrfs_fs_info *fs_info,
struct extent_buffer *leaf,
struct btrfs_dev_item *dev_item)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
u64 devid;
int ret;
u8 fs_uuid[BTRFS_FSID_SIZE];
u8 dev_uuid[BTRFS_UUID_SIZE];
devid = btrfs_device_id(leaf, dev_item);
read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
BTRFS_UUID_SIZE);
read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
BTRFS_FSID_SIZE);
if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
fs_devices = open_seed_devices(fs_info, fs_uuid);
if (IS_ERR(fs_devices))
return PTR_ERR(fs_devices);
}
device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
fs_uuid, true);
if (!device) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_report_missing_device(fs_info, devid,
dev_uuid, true);
return -ENOENT;
}
device = add_missing_dev(fs_devices, devid, dev_uuid);
if (IS_ERR(device)) {
btrfs_err(fs_info,
"failed to add missing dev %llu: %ld",
devid, PTR_ERR(device));
return PTR_ERR(device);
}
btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
} else {
if (!device->bdev) {
if (!btrfs_test_opt(fs_info, DEGRADED)) {
btrfs_report_missing_device(fs_info,
devid, dev_uuid, true);
return -ENOENT;
}
btrfs_report_missing_device(fs_info, devid,
dev_uuid, false);
}
if (!device->bdev &&
!test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
/*
* this happens when a device that was properly setup
* in the device info lists suddenly goes bad.
* device->bdev is NULL, and so we have to set
* device->missing to one here
*/
device->fs_devices->missing_devices++;
set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
}
/* Move the device to its own fs_devices */
if (device->fs_devices != fs_devices) {
ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
&device->dev_state));
list_move(&device->dev_list, &fs_devices->devices);
device->fs_devices->num_devices--;
fs_devices->num_devices++;
device->fs_devices->missing_devices--;
fs_devices->missing_devices++;
device->fs_devices = fs_devices;
}
}
if (device->fs_devices != fs_info->fs_devices) {
BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
if (device->generation !=
btrfs_device_generation(leaf, dev_item))
return -EINVAL;
}
fill_device_from_item(leaf, dev_item, device);
set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
device->fs_devices->total_rw_bytes += device->total_bytes;
atomic64_add(device->total_bytes - device->bytes_used,
&fs_info->free_chunk_space);
}
ret = 0;
return ret;
}
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->tree_root;
struct btrfs_super_block *super_copy = fs_info->super_copy;
struct extent_buffer *sb;
struct btrfs_disk_key *disk_key;
struct btrfs_chunk *chunk;
u8 *array_ptr;
unsigned long sb_array_offset;
int ret = 0;
u32 num_stripes;
u32 array_size;
u32 len = 0;
u32 cur_offset;
u64 type;
struct btrfs_key key;
ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
/*
* This will create extent buffer of nodesize, superblock size is
* fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
* overallocate but we can keep it as-is, only the first page is used.
*/
sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
if (IS_ERR(sb))
return PTR_ERR(sb);
set_extent_buffer_uptodate(sb);
btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
/*
* The sb extent buffer is artificial and just used to read the system array.
* set_extent_buffer_uptodate() call does not properly mark all it's
* pages up-to-date when the page is larger: extent does not cover the
* whole page and consequently check_page_uptodate does not find all
* the page's extents up-to-date (the hole beyond sb),
* write_extent_buffer then triggers a WARN_ON.
*
* Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
* but sb spans only this function. Add an explicit SetPageUptodate call
* to silence the warning eg. on PowerPC 64.
*/
if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
SetPageUptodate(sb->pages[0]);
write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
array_size = btrfs_super_sys_array_size(super_copy);
array_ptr = super_copy->sys_chunk_array;
sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
cur_offset = 0;
while (cur_offset < array_size) {
disk_key = (struct btrfs_disk_key *)array_ptr;
len = sizeof(*disk_key);
if (cur_offset + len > array_size)
goto out_short_read;
btrfs_disk_key_to_cpu(&key, disk_key);
array_ptr += len;
sb_array_offset += len;
cur_offset += len;
if (key.type == BTRFS_CHUNK_ITEM_KEY) {
chunk = (struct btrfs_chunk *)sb_array_offset;
/*
* At least one btrfs_chunk with one stripe must be
* present, exact stripe count check comes afterwards
*/
len = btrfs_chunk_item_size(1);
if (cur_offset + len > array_size)
goto out_short_read;
num_stripes = btrfs_chunk_num_stripes(sb, chunk);
if (!num_stripes) {
btrfs_err(fs_info,
"invalid number of stripes %u in sys_array at offset %u",
num_stripes, cur_offset);
ret = -EIO;
break;
}
type = btrfs_chunk_type(sb, chunk);
if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
btrfs_err(fs_info,
"invalid chunk type %llu in sys_array at offset %u",
type, cur_offset);
ret = -EIO;
break;
}
len = btrfs_chunk_item_size(num_stripes);
if (cur_offset + len > array_size)
goto out_short_read;
ret = read_one_chunk(fs_info, &key, sb, chunk);
if (ret)
break;
} else {
btrfs_err(fs_info,
"unexpected item type %u in sys_array at offset %u",
(u32)key.type, cur_offset);
ret = -EIO;
break;
}
array_ptr += len;
sb_array_offset += len;
cur_offset += len;
}
clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return ret;
out_short_read:
btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
len, cur_offset);
clear_extent_buffer_uptodate(sb);
free_extent_buffer_stale(sb);
return -EIO;
}
/*
* Check if all chunks in the fs are OK for read-write degraded mount
*
* If the @failing_dev is specified, it's accounted as missing.
*
* Return true if all chunks meet the minimal RW mount requirements.
* Return false if any chunk doesn't meet the minimal RW mount requirements.
*/
bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
struct btrfs_device *failing_dev)
{
struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
struct extent_map *em;
u64 next_start = 0;
bool ret = true;
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, 0, (u64)-1);
read_unlock(&map_tree->map_tree.lock);
/* No chunk at all? Return false anyway */
if (!em) {
ret = false;
goto out;
}
while (em) {
struct map_lookup *map;
int missing = 0;
int max_tolerated;
int i;
map = em->map_lookup;
max_tolerated =
btrfs_get_num_tolerated_disk_barrier_failures(
map->type);
for (i = 0; i < map->num_stripes; i++) {
struct btrfs_device *dev = map->stripes[i].dev;
if (!dev || !dev->bdev ||
test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
dev->last_flush_error)
missing++;
else if (failing_dev && failing_dev == dev)
missing++;
}
if (missing > max_tolerated) {
if (!failing_dev)
btrfs_warn(fs_info,
"chunk %llu missing %d devices, max tolerance is %d for writable mount",
em->start, missing, max_tolerated);
free_extent_map(em);
ret = false;
goto out;
}
next_start = extent_map_end(em);
free_extent_map(em);
read_lock(&map_tree->map_tree.lock);
em = lookup_extent_mapping(&map_tree->map_tree, next_start,
(u64)(-1) - next_start);
read_unlock(&map_tree->map_tree.lock);
}
out:
return ret;
}
int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
{
struct btrfs_root *root = fs_info->chunk_root;
struct btrfs_path *path;
struct extent_buffer *leaf;
struct btrfs_key key;
struct btrfs_key found_key;
int ret;
int slot;
u64 total_dev = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
/*
* uuid_mutex is needed only if we are mounting a sprout FS
* otherwise we don't need it.
*/
mutex_lock(&uuid_mutex);
mutex_lock(&fs_info->chunk_mutex);
/*
* Read all device items, and then all the chunk items. All
* device items are found before any chunk item (their object id
* is smaller than the lowest possible object id for a chunk
* item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
*/
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.offset = 0;
key.type = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto error;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
if (slot >= btrfs_header_nritems(leaf)) {
ret = btrfs_next_leaf(root, path);
if (ret == 0)
continue;
if (ret < 0)
goto error;
break;
}
btrfs_item_key_to_cpu(leaf, &found_key, slot);
if (found_key.type == BTRFS_DEV_ITEM_KEY) {
struct btrfs_dev_item *dev_item;
dev_item = btrfs_item_ptr(leaf, slot,
struct btrfs_dev_item);
ret = read_one_dev(fs_info, leaf, dev_item);
if (ret)
goto error;
total_dev++;
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
struct btrfs_chunk *chunk;
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
ret = read_one_chunk(fs_info, &found_key, leaf, chunk);
if (ret)
goto error;
}
path->slots[0]++;
}
/*
* After loading chunk tree, we've got all device information,
* do another round of validation checks.
*/
if (total_dev != fs_info->fs_devices->total_devices) {
btrfs_err(fs_info,
"super_num_devices %llu mismatch with num_devices %llu found here",
btrfs_super_num_devices(fs_info->super_copy),
total_dev);
ret = -EINVAL;
goto error;
}
if (btrfs_super_total_bytes(fs_info->super_copy) <
fs_info->fs_devices->total_rw_bytes) {
btrfs_err(fs_info,
"super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
btrfs_super_total_bytes(fs_info->super_copy),
fs_info->fs_devices->total_rw_bytes);
ret = -EINVAL;
goto error;
}
ret = 0;
error:
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&uuid_mutex);
btrfs_free_path(path);
return ret;
}
void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
while (fs_devices) {
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list)
device->fs_info = fs_info;
mutex_unlock(&fs_devices->device_list_mutex);
fs_devices = fs_devices->seed;
}
}
static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
{
int i;
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_dev_stat_reset(dev, i);
}
int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
{
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct extent_buffer *eb;
int slot;
int ret = 0;
struct btrfs_device *device;
struct btrfs_path *path = NULL;
int i;
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto out;
}
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
int item_size;
struct btrfs_dev_stats_item *ptr;
key.objectid = BTRFS_DEV_STATS_OBJECTID;
key.type = BTRFS_PERSISTENT_ITEM_KEY;
key.offset = device->devid;
ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
if (ret) {
__btrfs_reset_dev_stats(device);
device->dev_stats_valid = 1;
btrfs_release_path(path);
continue;
}
slot = path->slots[0];
eb = path->nodes[0];
btrfs_item_key_to_cpu(eb, &found_key, slot);
item_size = btrfs_item_size_nr(eb, slot);
ptr = btrfs_item_ptr(eb, slot,
struct btrfs_dev_stats_item);
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
if (item_size >= (1 + i) * sizeof(__le64))
btrfs_dev_stat_set(device, i,
btrfs_dev_stats_value(eb, ptr, i));
else
btrfs_dev_stat_reset(device, i);
}
device->dev_stats_valid = 1;
btrfs_dev_stat_print_on_load(device);
btrfs_release_path(path);
}
mutex_unlock(&fs_devices->device_list_mutex);
out:
btrfs_free_path(path);
return ret < 0 ? ret : 0;
}
static int update_dev_stat_item(struct btrfs_trans_handle *trans,
struct btrfs_device *device)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *dev_root = fs_info->dev_root;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *eb;
struct btrfs_dev_stats_item *ptr;
int ret;
int i;
key.objectid = BTRFS_DEV_STATS_OBJECTID;
key.type = BTRFS_PERSISTENT_ITEM_KEY;
key.offset = device->devid;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
if (ret < 0) {
btrfs_warn_in_rcu(fs_info,
"error %d while searching for dev_stats item for device %s",
ret, rcu_str_deref(device->name));
goto out;
}
if (ret == 0 &&
btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
/* need to delete old one and insert a new one */
ret = btrfs_del_item(trans, dev_root, path);
if (ret != 0) {
btrfs_warn_in_rcu(fs_info,
"delete too small dev_stats item for device %s failed %d",
rcu_str_deref(device->name), ret);
goto out;
}
ret = 1;
}
if (ret == 1) {
/* need to insert a new item */
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, dev_root, path,
&key, sizeof(*ptr));
if (ret < 0) {
btrfs_warn_in_rcu(fs_info,
"insert dev_stats item for device %s failed %d",
rcu_str_deref(device->name), ret);
goto out;
}
}
eb = path->nodes[0];
ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
btrfs_set_dev_stats_value(eb, ptr, i,
btrfs_dev_stat_read(device, i));
btrfs_mark_buffer_dirty(eb);
out:
btrfs_free_path(path);
return ret;
}
/*
* called from commit_transaction. Writes all changed device stats to disk.
*/
int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *device;
int stats_cnt;
int ret = 0;
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry(device, &fs_devices->devices, dev_list) {
stats_cnt = atomic_read(&device->dev_stats_ccnt);
if (!device->dev_stats_valid || stats_cnt == 0)
continue;
/*
* There is a LOAD-LOAD control dependency between the value of
* dev_stats_ccnt and updating the on-disk values which requires
* reading the in-memory counters. Such control dependencies
* require explicit read memory barriers.
*
* This memory barriers pairs with smp_mb__before_atomic in
* btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
* barrier implied by atomic_xchg in
* btrfs_dev_stats_read_and_reset
*/
smp_rmb();
ret = update_dev_stat_item(trans, device);
if (!ret)
atomic_sub(stats_cnt, &device->dev_stats_ccnt);
}
mutex_unlock(&fs_devices->device_list_mutex);
return ret;
}
void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
{
btrfs_dev_stat_inc(dev, index);
btrfs_dev_stat_print_on_error(dev);
}
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
{
if (!dev->dev_stats_valid)
return;
btrfs_err_rl_in_rcu(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
rcu_str_deref(dev->name),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}
static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
{
int i;
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
if (btrfs_dev_stat_read(dev, i) != 0)
break;
if (i == BTRFS_DEV_STAT_VALUES_MAX)
return; /* all values == 0, suppress message */
btrfs_info_in_rcu(dev->fs_info,
"bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
rcu_str_deref(dev->name),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
}
int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
struct btrfs_ioctl_get_dev_stats *stats)
{
struct btrfs_device *dev;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
int i;
mutex_lock(&fs_devices->device_list_mutex);
dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
true);
mutex_unlock(&fs_devices->device_list_mutex);
if (!dev) {
btrfs_warn(fs_info, "get dev_stats failed, device not found");
return -ENODEV;
} else if (!dev->dev_stats_valid) {
btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
return -ENODEV;
} else if (stats->flags & BTRFS_DEV_STATS_RESET) {
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
if (stats->nr_items > i)
stats->values[i] =
btrfs_dev_stat_read_and_reset(dev, i);
else
btrfs_dev_stat_reset(dev, i);
}
} else {
for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
if (stats->nr_items > i)
stats->values[i] = btrfs_dev_stat_read(dev, i);
}
if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
return 0;
}
void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path)
{
struct buffer_head *bh;
struct btrfs_super_block *disk_super;
int copy_num;
if (!bdev)
return;
for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX;
copy_num++) {
if (btrfs_read_dev_one_super(bdev, copy_num, &bh))
continue;
disk_super = (struct btrfs_super_block *)bh->b_data;
memset(&disk_super->magic, 0, sizeof(disk_super->magic));
set_buffer_dirty(bh);
sync_dirty_buffer(bh);
brelse(bh);
}
/* Notify udev that device has changed */
btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
/* Update ctime/mtime for device path for libblkid */
update_dev_time(device_path);
}
/*
* Update the size of all devices, which is used for writing out the
* super blocks.
*/
void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
struct btrfs_device *curr, *next;
if (list_empty(&fs_devices->resized_devices))
return;
mutex_lock(&fs_devices->device_list_mutex);
mutex_lock(&fs_info->chunk_mutex);
list_for_each_entry_safe(curr, next, &fs_devices->resized_devices,
resized_list) {
list_del_init(&curr->resized_list);
curr->commit_total_bytes = curr->disk_total_bytes;
}
mutex_unlock(&fs_info->chunk_mutex);
mutex_unlock(&fs_devices->device_list_mutex);
}
/* Must be invoked during the transaction commit */
void btrfs_update_commit_device_bytes_used(struct btrfs_transaction *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_device *dev;
int i;
if (list_empty(&trans->pending_chunks))
return;
/* In order to kick the device replace finish process */
mutex_lock(&fs_info->chunk_mutex);
list_for_each_entry(em, &trans->pending_chunks, list) {
map = em->map_lookup;
for (i = 0; i < map->num_stripes; i++) {
dev = map->stripes[i].dev;
dev->commit_bytes_used = dev->bytes_used;
}
}
mutex_unlock(&fs_info->chunk_mutex);
}
void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
while (fs_devices) {
fs_devices->fs_info = fs_info;
fs_devices = fs_devices->seed;
}
}
void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
{
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
while (fs_devices) {
fs_devices->fs_info = NULL;
fs_devices = fs_devices->seed;
}
}
/*
* Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
*/
int btrfs_bg_type_to_factor(u64 flags)
{
if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
return 2;
return 1;
}
static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
{
int index = btrfs_bg_flags_to_raid_index(type);
int ncopies = btrfs_raid_array[index].ncopies;
int data_stripes;
switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
case BTRFS_BLOCK_GROUP_RAID5:
data_stripes = num_stripes - 1;
break;
case BTRFS_BLOCK_GROUP_RAID6:
data_stripes = num_stripes - 2;
break;
default:
data_stripes = num_stripes / ncopies;
break;
}
return div_u64(chunk_len, data_stripes);
}
static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
u64 chunk_offset, u64 devid,
u64 physical_offset, u64 physical_len)
{
struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
struct extent_map *em;
struct map_lookup *map;
struct btrfs_device *dev;
u64 stripe_len;
bool found = false;
int ret = 0;
int i;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, chunk_offset, 1);
read_unlock(&em_tree->lock);
if (!em) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
physical_offset, devid);
ret = -EUCLEAN;
goto out;
}
map = em->map_lookup;
stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
if (physical_len != stripe_len) {
btrfs_err(fs_info,
"dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
physical_offset, devid, em->start, physical_len,
stripe_len);
ret = -EUCLEAN;
goto out;
}
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev->devid == devid &&
map->stripes[i].physical == physical_offset) {
found = true;
if (map->verified_stripes >= map->num_stripes) {
btrfs_err(fs_info,
"too many dev extents for chunk %llu found",
em->start);
ret = -EUCLEAN;
goto out;
}
map->verified_stripes++;
break;
}
}
if (!found) {
btrfs_err(fs_info,
"dev extent physical offset %llu devid %llu has no corresponding chunk",
physical_offset, devid);
ret = -EUCLEAN;
}
/* Make sure no dev extent is beyond device bondary */
dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
if (!dev) {
btrfs_err(fs_info, "failed to find devid %llu", devid);
ret = -EUCLEAN;
goto out;
}
/* It's possible this device is a dummy for seed device */
if (dev->disk_total_bytes == 0) {
dev = btrfs_find_device(fs_info->fs_devices->seed, devid, NULL,
NULL, false);
if (!dev) {
btrfs_err(fs_info, "failed to find seed devid %llu",
devid);
ret = -EUCLEAN;
goto out;
}
}
if (physical_offset + physical_len > dev->disk_total_bytes) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
devid, physical_offset, physical_len,
dev->disk_total_bytes);
ret = -EUCLEAN;
goto out;
}
out:
free_extent_map(em);
return ret;
}
static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
{
struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
struct extent_map *em;
struct rb_node *node;
int ret = 0;
read_lock(&em_tree->lock);
for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
em = rb_entry(node, struct extent_map, rb_node);
if (em->map_lookup->num_stripes !=
em->map_lookup->verified_stripes) {
btrfs_err(fs_info,
"chunk %llu has missing dev extent, have %d expect %d",
em->start, em->map_lookup->verified_stripes,
em->map_lookup->num_stripes);
ret = -EUCLEAN;
goto out;
}
}
out:
read_unlock(&em_tree->lock);
return ret;
}
/*
* Ensure that all dev extents are mapped to correct chunk, otherwise
* later chunk allocation/free would cause unexpected behavior.
*
* NOTE: This will iterate through the whole device tree, which should be of
* the same size level as the chunk tree. This slightly increases mount time.
*/
int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
{
struct btrfs_path *path;
struct btrfs_root *root = fs_info->dev_root;
struct btrfs_key key;
u64 prev_devid = 0;
u64 prev_dev_ext_end = 0;
int ret = 0;
key.objectid = 1;
key.type = BTRFS_DEV_EXTENT_KEY;
key.offset = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->reada = READA_FORWARD;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto out;
if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
ret = btrfs_next_item(root, path);
if (ret < 0)
goto out;
/* No dev extents at all? Not good */
if (ret > 0) {
ret = -EUCLEAN;
goto out;
}
}
while (1) {
struct extent_buffer *leaf = path->nodes[0];
struct btrfs_dev_extent *dext;
int slot = path->slots[0];
u64 chunk_offset;
u64 physical_offset;
u64 physical_len;
u64 devid;
btrfs_item_key_to_cpu(leaf, &key, slot);
if (key.type != BTRFS_DEV_EXTENT_KEY)
break;
devid = key.objectid;
physical_offset = key.offset;
dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
physical_len = btrfs_dev_extent_length(leaf, dext);
/* Check if this dev extent overlaps with the previous one */
if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
btrfs_err(fs_info,
"dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
devid, physical_offset, prev_dev_ext_end);
ret = -EUCLEAN;
goto out;
}
ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
physical_offset, physical_len);
if (ret < 0)
goto out;
prev_devid = devid;
prev_dev_ext_end = physical_offset + physical_len;
ret = btrfs_next_item(root, path);
if (ret < 0)
goto out;
if (ret > 0) {
ret = 0;
break;
}
}
/* Ensure all chunks have corresponding dev extents */
ret = verify_chunk_dev_extent_mapping(fs_info);
out:
btrfs_free_path(path);
return ret;
}
/*
* Check whether the given block group or device is pinned by any inode being
* used as a swapfile.
*/
bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
{
struct btrfs_swapfile_pin *sp;
struct rb_node *node;
spin_lock(&fs_info->swapfile_pins_lock);
node = fs_info->swapfile_pins.rb_node;
while (node) {
sp = rb_entry(node, struct btrfs_swapfile_pin, node);
if (ptr < sp->ptr)
node = node->rb_left;
else if (ptr > sp->ptr)
node = node->rb_right;
else
break;
}
spin_unlock(&fs_info->swapfile_pins_lock);
return node != NULL;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_1223_3 |
crossvul-cpp_data_good_1061_0 | /*
pcd.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
This is a high-level driver for parallel port ATAPI CD-ROM
drives based on chips supported by the paride module.
By default, the driver will autoprobe for a single parallel
port ATAPI CD-ROM drive, but if their individual parameters are
specified, the driver can handle up to 4 drives.
The behaviour of the pcd driver can be altered by setting
some parameters from the insmod command line. The following
parameters are adjustable:
drive0 These four arguments can be arrays of
drive1 1-6 integers as follows:
drive2
drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
Where,
<prt> is the base of the parallel port address for
the corresponding drive. (required)
<pro> is the protocol number for the adapter that
supports this drive. These numbers are
logged by 'paride' when the protocol modules
are initialised. (0 if not given)
<uni> for those adapters that support chained
devices, this is the unit selector for the
chain of devices on the given port. It should
be zero for devices that don't support chaining.
(0 if not given)
<mod> this can be -1 to choose the best mode, or one
of the mode numbers supported by the adapter.
(-1 if not given)
<slv> ATAPI CD-ROMs can be jumpered to master or slave.
Set this to 0 to choose the master drive, 1 to
choose the slave, -1 (the default) to choose the
first drive found.
<dly> some parallel ports require the driver to
go more slowly. -1 sets a default value that
should work with the chosen protocol. Otherwise,
set this to a small integer, the larger it is
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
major You may use this parameter to override the
default major number (46) that this driver
will use. Be sure to change the device
name as well.
name This parameter is a character string that
contains the name the kernel will use for this
device (in /proc output, for instance).
(default "pcd")
verbose This parameter controls the amount of logging
that the driver will do. Set it to 0 for
normal operation, 1 to see autoprobe progress
messages, or 2 to see additional debugging
output. (default 0)
nice This parameter controls the driver's use of
idle CPU time, at the expense of some speed.
If this driver is built into the kernel, you can use the
following kernel command line parameters, with the same values
as the corresponding module parameters listed above:
pcd.drive0
pcd.drive1
pcd.drive2
pcd.drive3
pcd.nice
In addition, you can use the parameter pcd.disable to disable
the driver entirely.
*/
/* Changes:
1.01 GRG 1998.01.24 Added test unit ready support
1.02 GRG 1998.05.06 Changes to pcd_completion, ready_wait,
and loosen interpretation of ATAPI
standard for clearing error status.
Use spinlocks. Eliminate sti().
1.03 GRG 1998.06.16 Eliminated an Ugh
1.04 GRG 1998.08.15 Added extra debugging, improvements to
pcd_completion, use HZ in loop timing
1.05 GRG 1998.08.16 Conformed to "Uniform CD-ROM" standard
1.06 GRG 1998.08.19 Added audio ioctl support
1.07 GRG 1998.09.24 Increased reset timeout, added jumbo support
*/
#define PCD_VERSION "1.07"
#define PCD_MAJOR 46
#define PCD_NAME "pcd"
#define PCD_UNITS 4
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is off
by default.
*/
static int verbose = 0;
static int major = PCD_MAJOR;
static char *name = PCD_NAME;
static int nice = 0;
static int disable = 0;
static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
static int pcd_drive_count;
enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
/* end of parameters */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
#include <linux/blk-mq.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
static DEFINE_MUTEX(pcd_mutex);
static DEFINE_SPINLOCK(pcd_lock);
module_param(verbose, int, 0644);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(nice, int, 0);
module_param_array(drive0, int, NULL, 0);
module_param_array(drive1, int, NULL, 0);
module_param_array(drive2, int, NULL, 0);
module_param_array(drive3, int, NULL, 0);
#include "paride.h"
#include "pseudo.h"
#define PCD_RETRIES 5
#define PCD_TMO 800 /* timeout in jiffies */
#define PCD_DELAY 50 /* spin delay in uS */
#define PCD_READY_TMO 20 /* in seconds */
#define PCD_RESET_TMO 100 /* in tenths of a second */
#define PCD_SPIN (1000000*PCD_TMO)/(HZ*PCD_DELAY)
#define IDE_ERR 0x01
#define IDE_DRQ 0x08
#define IDE_READY 0x40
#define IDE_BUSY 0x80
static int pcd_open(struct cdrom_device_info *cdi, int purpose);
static void pcd_release(struct cdrom_device_info *cdi);
static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int slot_nr);
static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
static int pcd_drive_reset(struct cdrom_device_info *cdi);
static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn);
static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
unsigned int cmd, void *arg);
static int pcd_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc);
static int pcd_detect(void);
static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
static void do_pcd_read(void);
struct pcd_unit {
struct pi_adapter pia; /* interface to paride layer */
struct pi_adapter *pi;
int drive; /* master/slave */
int last_sense; /* result of last request sense */
int changed; /* media change seen */
int present; /* does this unit exist ? */
char *name; /* pcd0, pcd1, etc */
struct cdrom_device_info info; /* uniform cdrom interface */
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
struct list_head rq_list;
};
static struct pcd_unit pcd[PCD_UNITS];
static char pcd_scratch[64];
static char pcd_buffer[2048]; /* raw block buffer */
static int pcd_bufblk = -1; /* block in buffer, in CD units,
-1 for nothing there. See also
pd_unit.
*/
/* the variables below are used mainly in the I/O request engine, which
processes only one request at a time.
*/
static struct pcd_unit *pcd_current; /* current request's drive */
static struct request *pcd_req;
static int pcd_retries; /* retries on current request */
static int pcd_busy; /* request being processed ? */
static int pcd_sector; /* address of next requested sector */
static int pcd_count; /* number of blocks still to do */
static char *pcd_buf; /* buffer for request in progress */
static void *par_drv; /* reference of parport driver */
/* kernel glue structures */
static int pcd_block_open(struct block_device *bdev, fmode_t mode)
{
struct pcd_unit *cd = bdev->bd_disk->private_data;
int ret;
check_disk_change(bdev);
mutex_lock(&pcd_mutex);
ret = cdrom_open(&cd->info, bdev, mode);
mutex_unlock(&pcd_mutex);
return ret;
}
static void pcd_block_release(struct gendisk *disk, fmode_t mode)
{
struct pcd_unit *cd = disk->private_data;
mutex_lock(&pcd_mutex);
cdrom_release(&cd->info, mode);
mutex_unlock(&pcd_mutex);
}
static int pcd_block_ioctl(struct block_device *bdev, fmode_t mode,
unsigned cmd, unsigned long arg)
{
struct pcd_unit *cd = bdev->bd_disk->private_data;
int ret;
mutex_lock(&pcd_mutex);
ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg);
mutex_unlock(&pcd_mutex);
return ret;
}
static unsigned int pcd_block_check_events(struct gendisk *disk,
unsigned int clearing)
{
struct pcd_unit *cd = disk->private_data;
return cdrom_check_events(&cd->info, clearing);
}
static const struct block_device_operations pcd_bdops = {
.owner = THIS_MODULE,
.open = pcd_block_open,
.release = pcd_block_release,
.ioctl = pcd_block_ioctl,
.check_events = pcd_block_check_events,
};
static const struct cdrom_device_ops pcd_dops = {
.open = pcd_open,
.release = pcd_release,
.drive_status = pcd_drive_status,
.check_events = pcd_check_events,
.tray_move = pcd_tray_move,
.lock_door = pcd_lock_door,
.get_mcn = pcd_get_mcn,
.reset = pcd_drive_reset,
.audio_ioctl = pcd_audio_ioctl,
.generic_packet = pcd_packet,
.capability = CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
CDC_MCN | CDC_MEDIA_CHANGED | CDC_RESET |
CDC_PLAY_AUDIO | CDC_GENERIC_PACKET | CDC_CD_R |
CDC_CD_RW,
};
static const struct blk_mq_ops pcd_mq_ops = {
.queue_rq = pcd_queue_rq,
};
static void pcd_init_units(void)
{
struct pcd_unit *cd;
int unit;
pcd_drive_count = 0;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
struct gendisk *disk = alloc_disk(1);
if (!disk)
continue;
disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
put_disk(disk);
disk->queue = NULL;
continue;
}
INIT_LIST_HEAD(&cd->rq_list);
disk->queue->queuedata = cd;
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
cd->disk = disk;
cd->pi = &cd->pia;
cd->present = 0;
cd->last_sense = 0;
cd->changed = 1;
cd->drive = (*drives[unit])[D_SLV];
if ((*drives[unit])[D_PRT])
pcd_drive_count++;
cd->name = &cd->info.name[0];
snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
cd->info.ops = &pcd_dops;
cd->info.handle = cd;
cd->info.speed = 0;
cd->info.capacity = 1;
cd->info.mask = 0;
disk->major = major;
disk->first_minor = unit;
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
}
}
static int pcd_open(struct cdrom_device_info *cdi, int purpose)
{
struct pcd_unit *cd = cdi->handle;
if (!cd->present)
return -ENODEV;
return 0;
}
static void pcd_release(struct cdrom_device_info *cdi)
{
}
static inline int status_reg(struct pcd_unit *cd)
{
return pi_read_regr(cd->pi, 1, 6);
}
static inline int read_reg(struct pcd_unit *cd, int reg)
{
return pi_read_regr(cd->pi, 0, reg);
}
static inline void write_reg(struct pcd_unit *cd, int reg, int val)
{
pi_write_regr(cd->pi, 0, reg, val);
}
static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg)
{
int j, r, e, s, p;
j = 0;
while ((((r = status_reg(cd)) & go) || (stop && (!(r & stop))))
&& (j++ < PCD_SPIN))
udelay(PCD_DELAY);
if ((r & (IDE_ERR & stop)) || (j > PCD_SPIN)) {
s = read_reg(cd, 7);
e = read_reg(cd, 1);
p = read_reg(cd, 2);
if (j > PCD_SPIN)
e |= 0x100;
if (fun)
printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
" loop=%d phase=%d\n",
cd->name, fun, msg, r, s, e, j, p);
return (s << 8) + r;
}
return 0;
}
static int pcd_command(struct pcd_unit *cd, char *cmd, int dlen, char *fun)
{
pi_connect(cd->pi);
write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
if (pcd_wait(cd, IDE_BUSY | IDE_DRQ, 0, fun, "before command")) {
pi_disconnect(cd->pi);
return -1;
}
write_reg(cd, 4, dlen % 256);
write_reg(cd, 5, dlen / 256);
write_reg(cd, 7, 0xa0); /* ATAPI packet command */
if (pcd_wait(cd, IDE_BUSY, IDE_DRQ, fun, "command DRQ")) {
pi_disconnect(cd->pi);
return -1;
}
if (read_reg(cd, 2) != 1) {
printk("%s: %s: command phase error\n", cd->name, fun);
pi_disconnect(cd->pi);
return -1;
}
pi_write_block(cd->pi, cmd, 12);
return 0;
}
static int pcd_completion(struct pcd_unit *cd, char *buf, char *fun)
{
int r, d, p, n, k, j;
r = -1;
k = 0;
j = 0;
if (!pcd_wait(cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR,
fun, "completion")) {
r = 0;
while (read_reg(cd, 7) & IDE_DRQ) {
d = read_reg(cd, 4) + 256 * read_reg(cd, 5);
n = (d + 3) & 0xfffc;
p = read_reg(cd, 2) & 3;
if ((p == 2) && (n > 0) && (j == 0)) {
pi_read_block(cd->pi, buf, n);
if (verbose > 1)
printk("%s: %s: Read %d bytes\n",
cd->name, fun, n);
r = 0;
j++;
} else {
if (verbose > 1)
printk
("%s: %s: Unexpected phase %d, d=%d, k=%d\n",
cd->name, fun, p, d, k);
if (verbose < 2)
printk_once(
"%s: WARNING: ATAPI phase errors\n",
cd->name);
mdelay(1);
}
if (k++ > PCD_TMO) {
printk("%s: Stuck DRQ\n", cd->name);
break;
}
if (pcd_wait
(cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR, fun,
"completion")) {
r = -1;
break;
}
}
}
pi_disconnect(cd->pi);
return r;
}
static void pcd_req_sense(struct pcd_unit *cd, char *fun)
{
char rs_cmd[12] = { 0x03, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
char buf[16];
int r, c;
r = pcd_command(cd, rs_cmd, 16, "Request sense");
mdelay(1);
if (!r)
pcd_completion(cd, buf, "Request sense");
cd->last_sense = -1;
c = 2;
if (!r) {
if (fun)
printk("%s: %s: Sense key: %x, ASC: %x, ASQ: %x\n",
cd->name, fun, buf[2] & 0xf, buf[12], buf[13]);
c = buf[2] & 0xf;
cd->last_sense =
c | ((buf[12] & 0xff) << 8) | ((buf[13] & 0xff) << 16);
}
if ((c == 2) || (c == 6))
cd->changed = 1;
}
static int pcd_atapi(struct pcd_unit *cd, char *cmd, int dlen, char *buf, char *fun)
{
int r;
r = pcd_command(cd, cmd, dlen, fun);
mdelay(1);
if (!r)
r = pcd_completion(cd, buf, fun);
if (r)
pcd_req_sense(cd, fun);
return r;
}
static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc)
{
return pcd_atapi(cdi->handle, cgc->cmd, cgc->buflen, cgc->buffer,
"generic packet");
}
#define DBMSG(msg) ((verbose>1)?(msg):NULL)
static unsigned int pcd_check_events(struct cdrom_device_info *cdi,
unsigned int clearing, int slot_nr)
{
struct pcd_unit *cd = cdi->handle;
int res = cd->changed;
if (res)
cd->changed = 0;
return res ? DISK_EVENT_MEDIA_CHANGE : 0;
}
static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
{
char un_cmd[12] = { 0x1e, 0, 0, 0, lock, 0, 0, 0, 0, 0, 0, 0 };
return pcd_atapi(cdi->handle, un_cmd, 0, pcd_scratch,
lock ? "lock door" : "unlock door");
}
static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
{
char ej_cmd[12] = { 0x1b, 0, 0, 0, 3 - position, 0, 0, 0, 0, 0, 0, 0 };
return pcd_atapi(cdi->handle, ej_cmd, 0, pcd_scratch,
position ? "eject" : "close tray");
}
static void pcd_sleep(int cs)
{
schedule_timeout_interruptible(cs);
}
static int pcd_reset(struct pcd_unit *cd)
{
int i, k, flg;
int expect[5] = { 1, 1, 1, 0x14, 0xeb };
pi_connect(cd->pi);
write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
write_reg(cd, 7, 8);
pcd_sleep(20 * HZ / 1000); /* delay a bit */
k = 0;
while ((k++ < PCD_RESET_TMO) && (status_reg(cd) & IDE_BUSY))
pcd_sleep(HZ / 10);
flg = 1;
for (i = 0; i < 5; i++)
flg &= (read_reg(cd, i + 1) == expect[i]);
if (verbose) {
printk("%s: Reset (%d) signature = ", cd->name, k);
for (i = 0; i < 5; i++)
printk("%3x", read_reg(cd, i + 1));
if (!flg)
printk(" (incorrect)");
printk("\n");
}
pi_disconnect(cd->pi);
return flg - 1;
}
static int pcd_drive_reset(struct cdrom_device_info *cdi)
{
return pcd_reset(cdi->handle);
}
static int pcd_ready_wait(struct pcd_unit *cd, int tmo)
{
char tr_cmd[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
int k, p;
k = 0;
while (k < tmo) {
cd->last_sense = 0;
pcd_atapi(cd, tr_cmd, 0, NULL, DBMSG("test unit ready"));
p = cd->last_sense;
if (!p)
return 0;
if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
return p;
k++;
pcd_sleep(HZ);
}
return 0x000020; /* timeout */
}
static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
{
char rc_cmd[12] = { 0x25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
struct pcd_unit *cd = cdi->handle;
if (pcd_ready_wait(cd, PCD_READY_TMO))
return CDS_DRIVE_NOT_READY;
if (pcd_atapi(cd, rc_cmd, 8, pcd_scratch, DBMSG("check media")))
return CDS_NO_DISC;
return CDS_DISC_OK;
}
static int pcd_identify(struct pcd_unit *cd, char *id)
{
int k, s;
char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
pcd_bufblk = -1;
s = pcd_atapi(cd, id_cmd, 36, pcd_buffer, "identify");
if (s)
return -1;
if ((pcd_buffer[0] & 0x1f) != 5) {
if (verbose)
printk("%s: %s is not a CD-ROM\n",
cd->name, cd->drive ? "Slave" : "Master");
return -1;
}
memcpy(id, pcd_buffer + 16, 16);
id[16] = 0;
k = 16;
while ((k >= 0) && (id[k] <= 0x20)) {
id[k] = 0;
k--;
}
printk("%s: %s: %s\n", cd->name, cd->drive ? "Slave" : "Master", id);
return 0;
}
/*
* returns 0, with id set if drive is detected
* -1, if drive detection failed
*/
static int pcd_probe(struct pcd_unit *cd, int ms, char *id)
{
if (ms == -1) {
for (cd->drive = 0; cd->drive <= 1; cd->drive++)
if (!pcd_reset(cd) && !pcd_identify(cd, id))
return 0;
} else {
cd->drive = ms;
if (!pcd_reset(cd) && !pcd_identify(cd, id))
return 0;
}
return -1;
}
static void pcd_probe_capabilities(void)
{
int unit, r;
char buffer[32];
char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
struct pcd_unit *cd;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->present)
continue;
r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
if (r)
continue;
/* we should now have the cap page */
if ((buffer[11] & 1) == 0)
cd->info.mask |= CDC_CD_R;
if ((buffer[11] & 2) == 0)
cd->info.mask |= CDC_CD_RW;
if ((buffer[12] & 1) == 0)
cd->info.mask |= CDC_PLAY_AUDIO;
if ((buffer[14] & 1) == 0)
cd->info.mask |= CDC_LOCK;
if ((buffer[14] & 8) == 0)
cd->info.mask |= CDC_OPEN_TRAY;
if ((buffer[14] >> 6) == 0)
cd->info.mask |= CDC_CLOSE_TRAY;
}
}
static int pcd_detect(void)
{
char id[18];
int k, unit;
struct pcd_unit *cd;
printk("%s: %s version %s, major %d, nice %d\n",
name, name, PCD_VERSION, major, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
return -1;
}
k = 0;
if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
cd = pcd;
if (pi_init(cd->pi, 1, -1, -1, -1, -1, -1, pcd_buffer,
PI_PCD, verbose, cd->name)) {
if (!pcd_probe(cd, -1, id) && cd->disk) {
cd->present = 1;
k++;
} else
pi_release(cd->pi);
}
} else {
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pcd_buffer, PI_PCD, verbose, cd->name))
continue;
if (!pcd_probe(cd, conf[D_SLV], id) && cd->disk) {
cd->present = 1;
k++;
} else
pi_release(cd->pi);
}
}
if (k)
return 0;
printk("%s: No CD-ROM drive found\n", name);
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->disk)
continue;
blk_cleanup_queue(cd->disk->queue);
cd->disk->queue = NULL;
blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
pi_unregister_driver(par_drv);
return -1;
}
/* I/O request processing */
static int pcd_queue;
static int set_next_request(void)
{
struct pcd_unit *cd;
int old_pos = pcd_queue;
do {
cd = &pcd[pcd_queue];
if (++pcd_queue == PCD_UNITS)
pcd_queue = 0;
if (cd->present && !list_empty(&cd->rq_list)) {
pcd_req = list_first_entry(&cd->rq_list, struct request,
queuelist);
list_del_init(&pcd_req->queuelist);
blk_mq_start_request(pcd_req);
break;
}
} while (pcd_queue != old_pos);
return pcd_req != NULL;
}
static void pcd_request(void)
{
struct pcd_unit *cd;
if (pcd_busy)
return;
if (!pcd_req && !set_next_request())
return;
cd = pcd_req->rq_disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
pcd_sector = blk_rq_pos(pcd_req);
pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = bio_data(pcd_req->bio);
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
}
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct pcd_unit *cd = hctx->queue->queuedata;
if (rq_data_dir(bd->rq) != READ) {
blk_mq_start_request(bd->rq);
return BLK_STS_IOERR;
}
spin_lock_irq(&pcd_lock);
list_add_tail(&bd->rq->queuelist, &cd->rq_list);
pcd_request();
spin_unlock_irq(&pcd_lock);
return BLK_STS_OK;
}
static inline void next_request(blk_status_t err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pcd_lock, saved_flags);
if (!blk_update_request(pcd_req, err, blk_rq_cur_bytes(pcd_req))) {
__blk_mq_end_request(pcd_req, err);
pcd_req = NULL;
}
pcd_busy = 0;
pcd_request();
spin_unlock_irqrestore(&pcd_lock, saved_flags);
}
static int pcd_ready(void)
{
return (((status_reg(pcd_current) & (IDE_BUSY | IDE_DRQ)) == IDE_DRQ));
}
static void pcd_transfer(void)
{
while (pcd_count && (pcd_sector / 4 == pcd_bufblk)) {
int o = (pcd_sector % 4) * 512;
memcpy(pcd_buf, pcd_buffer + o, 512);
pcd_count--;
pcd_buf += 512;
pcd_sector++;
}
}
static void pcd_start(void)
{
int b, i;
char rd_cmd[12] = { 0xa8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 };
pcd_bufblk = pcd_sector / 4;
b = pcd_bufblk;
for (i = 0; i < 4; i++) {
rd_cmd[5 - i] = b & 0xff;
b = b >> 8;
}
if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
pcd_bufblk = -1;
next_request(BLK_STS_IOERR);
return;
}
mdelay(1);
ps_set_intr(do_pcd_read_drq, pcd_ready, PCD_TMO, nice);
}
static void do_pcd_read(void)
{
pcd_busy = 1;
pcd_retries = 0;
pcd_transfer();
if (!pcd_count) {
next_request(0);
return;
}
pi_do_claimed(pcd_current->pi, pcd_start);
}
static void do_pcd_read_drq(void)
{
unsigned long saved_flags;
if (pcd_completion(pcd_current, pcd_buffer, "read block")) {
if (pcd_retries < PCD_RETRIES) {
mdelay(1);
pcd_retries++;
pi_do_claimed(pcd_current->pi, pcd_start);
return;
}
pcd_bufblk = -1;
next_request(BLK_STS_IOERR);
return;
}
do_pcd_read();
spin_lock_irqsave(&pcd_lock, saved_flags);
pcd_request();
spin_unlock_irqrestore(&pcd_lock, saved_flags);
}
/* the audio_ioctl stuff is adapted from sr_ioctl.c */
static int pcd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
{
struct pcd_unit *cd = cdi->handle;
switch (cmd) {
case CDROMREADTOCHDR:
{
char cmd[12] =
{ GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
0, 0, 0 };
struct cdrom_tochdr *tochdr =
(struct cdrom_tochdr *) arg;
char buffer[32];
int r;
r = pcd_atapi(cd, cmd, 12, buffer, "read toc header");
tochdr->cdth_trk0 = buffer[2];
tochdr->cdth_trk1 = buffer[3];
return r ? -EIO : 0;
}
case CDROMREADTOCENTRY:
{
char cmd[12] =
{ GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
0, 0, 0 };
struct cdrom_tocentry *tocentry =
(struct cdrom_tocentry *) arg;
unsigned char buffer[32];
int r;
cmd[1] =
(tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
cmd[6] = tocentry->cdte_track;
r = pcd_atapi(cd, cmd, 12, buffer, "read toc entry");
tocentry->cdte_ctrl = buffer[5] & 0xf;
tocentry->cdte_adr = buffer[5] >> 4;
tocentry->cdte_datamode =
(tocentry->cdte_ctrl & 0x04) ? 1 : 0;
if (tocentry->cdte_format == CDROM_MSF) {
tocentry->cdte_addr.msf.minute = buffer[9];
tocentry->cdte_addr.msf.second = buffer[10];
tocentry->cdte_addr.msf.frame = buffer[11];
} else
tocentry->cdte_addr.lba =
(((((buffer[8] << 8) + buffer[9]) << 8)
+ buffer[10]) << 8) + buffer[11];
return r ? -EIO : 0;
}
default:
return -ENOSYS;
}
}
static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
{
char cmd[12] =
{ GPCMD_READ_SUBCHANNEL, 0, 0x40, 2, 0, 0, 0, 0, 24, 0, 0, 0 };
char buffer[32];
if (pcd_atapi(cdi->handle, cmd, 24, buffer, "get mcn"))
return -EIO;
memcpy(mcn->medium_catalog_number, buffer + 9, 13);
mcn->medium_catalog_number[13] = 0;
return 0;
}
static int __init pcd_init(void)
{
struct pcd_unit *cd;
int unit;
if (disable)
return -EINVAL;
pcd_init_units();
if (pcd_detect())
return -ENODEV;
/* get the atapi capabilities page */
pcd_probe_capabilities();
if (register_blkdev(major, name)) {
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->disk)
continue;
blk_cleanup_queue(cd->disk->queue);
blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
return -EBUSY;
}
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (cd->present) {
register_cdrom(&cd->info);
cd->disk->private_data = cd;
add_disk(cd->disk);
}
}
return 0;
}
static void __exit pcd_exit(void)
{
struct pcd_unit *cd;
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
if (!cd->disk)
continue;
if (cd->present) {
del_gendisk(cd->disk);
pi_release(cd->pi);
unregister_cdrom(&cd->info);
}
blk_cleanup_queue(cd->disk->queue);
blk_mq_free_tag_set(&cd->tag_set);
put_disk(cd->disk);
}
unregister_blkdev(major, name);
pi_unregister_driver(par_drv);
}
MODULE_LICENSE("GPL");
module_init(pcd_init)
module_exit(pcd_exit)
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_1061_0 |
crossvul-cpp_data_bad_3223_1 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2002 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* JP2 Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <assert.h>
#include <stdlib.h>
#include <inttypes.h>
#include "jasper/jas_stream.h"
#include "jasper/jas_malloc.h"
#include "jasper/jas_debug.h"
#include "jp2_cod.h"
/******************************************************************************\
* Function prototypes.
\******************************************************************************/
#define ONES(n) ((1 << (n)) - 1)
jp2_boxinfo_t *jp2_boxinfolookup(int type);
static int jp2_getuint8(jas_stream_t *in, uint_fast8_t *val);
static int jp2_getuint16(jas_stream_t *in, uint_fast16_t *val);
static int jp2_getuint32(jas_stream_t *in, uint_fast32_t *val);
static int jp2_getuint64(jas_stream_t *in, uint_fast64_t *val);
static int jp2_putuint8(jas_stream_t *out, uint_fast8_t val);
static int jp2_putuint16(jas_stream_t *out, uint_fast16_t val);
static int jp2_putuint32(jas_stream_t *out, uint_fast32_t val);
static int jp2_putuint64(jas_stream_t *out, uint_fast64_t val);
static int jp2_getint(jas_stream_t *in, int s, int n, int_fast32_t *val);
jp2_box_t *jp2_box_get(jas_stream_t *in);
void jp2_box_dump(jp2_box_t *box, FILE *out);
static int jp2_jp_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_jp_putdata(jp2_box_t *box, jas_stream_t *out);
static int jp2_ftyp_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_ftyp_putdata(jp2_box_t *box, jas_stream_t *out);
static int jp2_ihdr_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_ihdr_putdata(jp2_box_t *box, jas_stream_t *out);
static void jp2_bpcc_destroy(jp2_box_t *box);
static int jp2_bpcc_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_bpcc_putdata(jp2_box_t *box, jas_stream_t *out);
static int jp2_colr_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_colr_putdata(jp2_box_t *box, jas_stream_t *out);
static void jp2_colr_dumpdata(jp2_box_t *box, FILE *out);
static void jp2_colr_destroy(jp2_box_t *box);
static void jp2_cdef_destroy(jp2_box_t *box);
static int jp2_cdef_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_cdef_putdata(jp2_box_t *box, jas_stream_t *out);
static void jp2_cdef_dumpdata(jp2_box_t *box, FILE *out);
static void jp2_cmap_destroy(jp2_box_t *box);
static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_cmap_putdata(jp2_box_t *box, jas_stream_t *out);
static void jp2_cmap_dumpdata(jp2_box_t *box, FILE *out);
static void jp2_pclr_destroy(jp2_box_t *box);
static int jp2_pclr_getdata(jp2_box_t *box, jas_stream_t *in);
static int jp2_pclr_putdata(jp2_box_t *box, jas_stream_t *out);
static void jp2_pclr_dumpdata(jp2_box_t *box, FILE *out);
/******************************************************************************\
* Local data.
\******************************************************************************/
jp2_boxinfo_t jp2_boxinfos[] = {
{JP2_BOX_JP, "JP", 0,
{0, 0, jp2_jp_getdata, jp2_jp_putdata, 0}},
{JP2_BOX_FTYP, "FTYP", 0,
{0, 0, jp2_ftyp_getdata, jp2_ftyp_putdata, 0}},
{JP2_BOX_JP2H, "JP2H", JP2_BOX_SUPER,
{0, 0, 0, 0, 0}},
{JP2_BOX_IHDR, "IHDR", 0,
{0, 0, jp2_ihdr_getdata, jp2_ihdr_putdata, 0}},
{JP2_BOX_BPCC, "BPCC", 0,
{0, jp2_bpcc_destroy, jp2_bpcc_getdata, jp2_bpcc_putdata, 0}},
{JP2_BOX_COLR, "COLR", 0,
{0, jp2_colr_destroy, jp2_colr_getdata, jp2_colr_putdata, jp2_colr_dumpdata}},
{JP2_BOX_PCLR, "PCLR", 0,
{0, jp2_pclr_destroy, jp2_pclr_getdata, jp2_pclr_putdata, jp2_pclr_dumpdata}},
{JP2_BOX_CMAP, "CMAP", 0,
{0, jp2_cmap_destroy, jp2_cmap_getdata, jp2_cmap_putdata, jp2_cmap_dumpdata}},
{JP2_BOX_CDEF, "CDEF", 0,
{0, jp2_cdef_destroy, jp2_cdef_getdata, jp2_cdef_putdata, jp2_cdef_dumpdata}},
{JP2_BOX_RES, "RES", JP2_BOX_SUPER,
{0, 0, 0, 0, 0}},
{JP2_BOX_RESC, "RESC", 0,
{0, 0, 0, 0, 0}},
{JP2_BOX_RESD, "RESD", 0,
{0, 0, 0, 0, 0}},
{JP2_BOX_JP2C, "JP2C", JP2_BOX_NODATA,
{0, 0, 0, 0, 0}},
{JP2_BOX_JP2I, "JP2I", 0,
{0, 0, 0, 0, 0}},
{JP2_BOX_XML, "XML", 0,
{0, 0, 0, 0, 0}},
{JP2_BOX_UUID, "UUID", 0,
{0, 0, 0, 0, 0}},
{JP2_BOX_UINF, "UINF", JP2_BOX_SUPER,
{0, 0, 0, 0, 0}},
{JP2_BOX_ULST, "ULST", 0,
{0, 0, 0, 0, 0}},
{JP2_BOX_URL, "URL", 0,
{0, 0, 0, 0, 0}},
{0, 0, 0, {0, 0, 0, 0, 0}},
};
jp2_boxinfo_t jp2_boxinfo_unk = {
0, "Unknown", 0, {0, 0, 0, 0, 0}
};
/******************************************************************************\
* Box constructor.
\******************************************************************************/
jp2_box_t *jp2_box_create(int type)
{
jp2_box_t *box;
jp2_boxinfo_t *boxinfo;
if (!(box = jas_malloc(sizeof(jp2_box_t)))) {
return 0;
}
memset(box, 0, sizeof(jp2_box_t));
box->type = type;
box->len = 0;
if (!(boxinfo = jp2_boxinfolookup(type))) {
return 0;
}
box->info = boxinfo;
box->ops = &boxinfo->ops;
return box;
}
/******************************************************************************\
* Box destructor.
\******************************************************************************/
void jp2_box_destroy(jp2_box_t *box)
{
if (box->ops->destroy) {
(*box->ops->destroy)(box);
}
jas_free(box);
}
static void jp2_bpcc_destroy(jp2_box_t *box)
{
jp2_bpcc_t *bpcc = &box->data.bpcc;
if (bpcc->bpcs) {
jas_free(bpcc->bpcs);
bpcc->bpcs = 0;
}
}
static void jp2_cdef_destroy(jp2_box_t *box)
{
jp2_cdef_t *cdef = &box->data.cdef;
if (cdef->ents) {
jas_free(cdef->ents);
cdef->ents = 0;
}
}
/******************************************************************************\
* Box input.
\******************************************************************************/
jp2_box_t *jp2_box_get(jas_stream_t *in)
{
jp2_box_t *box;
jp2_boxinfo_t *boxinfo;
jas_stream_t *tmpstream;
uint_fast32_t len;
uint_fast64_t extlen;
bool dataflag;
box = 0;
tmpstream = 0;
if (!(box = jas_malloc(sizeof(jp2_box_t)))) {
goto error;
}
// Mark the box data as never having been constructed
// so that we will not errantly attempt to destroy it later.
box->ops = &jp2_boxinfo_unk.ops;
if (jp2_getuint32(in, &len) || jp2_getuint32(in, &box->type)) {
goto error;
}
boxinfo = jp2_boxinfolookup(box->type);
box->info = boxinfo;
box->len = len;
JAS_DBGLOG(10, (
"preliminary processing of JP2 box: type=%c%s%c (0x%08x); length=%d\n",
'"', boxinfo->name, '"', box->type, box->len
));
if (box->len == 1) {
if (jp2_getuint64(in, &extlen)) {
goto error;
}
if (extlen > 0xffffffffUL) {
jas_eprintf("warning: cannot handle large 64-bit box length\n");
extlen = 0xffffffffUL;
}
box->len = extlen;
box->datalen = extlen - JP2_BOX_HDRLEN(true);
} else {
box->datalen = box->len - JP2_BOX_HDRLEN(false);
}
if (box->len != 0 && box->len < 8) {
goto error;
}
dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA));
if (dataflag) {
if (!(tmpstream = jas_stream_memopen(0, 0))) {
goto error;
}
if (jas_stream_copy(tmpstream, in, box->datalen)) {
jas_eprintf("cannot copy box data\n");
goto error;
}
jas_stream_rewind(tmpstream);
// From here onwards, the box data will need to be destroyed.
// So, initialize the box operations.
box->ops = &boxinfo->ops;
if (box->ops->getdata) {
if ((*box->ops->getdata)(box, tmpstream)) {
jas_eprintf("cannot parse box data\n");
goto error;
}
}
jas_stream_close(tmpstream);
}
if (jas_getdbglevel() >= 1) {
jp2_box_dump(box, stderr);
}
return box;
error:
if (box) {
jp2_box_destroy(box);
}
if (tmpstream) {
jas_stream_close(tmpstream);
}
return 0;
}
void jp2_box_dump(jp2_box_t *box, FILE *out)
{
jp2_boxinfo_t *boxinfo;
boxinfo = jp2_boxinfolookup(box->type);
assert(boxinfo);
fprintf(out, "JP2 box: ");
fprintf(out, "type=%c%s%c (0x%08"PRIxFAST32"); length=%"PRIuFAST32"\n", '"',
boxinfo->name, '"', box->type, box->len);
if (box->ops->dumpdata) {
(*box->ops->dumpdata)(box, out);
}
}
static int jp2_jp_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_jp_t *jp = &box->data.jp;
if (jp2_getuint32(in, &jp->magic)) {
return -1;
}
return 0;
}
static int jp2_ftyp_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_ftyp_t *ftyp = &box->data.ftyp;
unsigned int i;
if (jp2_getuint32(in, &ftyp->majver) || jp2_getuint32(in, &ftyp->minver)) {
return -1;
}
ftyp->numcompatcodes = (box->datalen - 8) / 4;
if (ftyp->numcompatcodes > JP2_FTYP_MAXCOMPATCODES) {
return -1;
}
for (i = 0; i < ftyp->numcompatcodes; ++i) {
if (jp2_getuint32(in, &ftyp->compatcodes[i])) {
return -1;
}
}
return 0;
}
static int jp2_ihdr_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_ihdr_t *ihdr = &box->data.ihdr;
if (jp2_getuint32(in, &ihdr->height) || jp2_getuint32(in, &ihdr->width) ||
jp2_getuint16(in, &ihdr->numcmpts) || jp2_getuint8(in, &ihdr->bpc) ||
jp2_getuint8(in, &ihdr->comptype) || jp2_getuint8(in, &ihdr->csunk) ||
jp2_getuint8(in, &ihdr->ipr)) {
return -1;
}
return 0;
}
static int jp2_bpcc_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_bpcc_t *bpcc = &box->data.bpcc;
unsigned int i;
bpcc->numcmpts = box->datalen;
if (!(bpcc->bpcs = jas_alloc2(bpcc->numcmpts, sizeof(uint_fast8_t)))) {
return -1;
}
for (i = 0; i < bpcc->numcmpts; ++i) {
if (jp2_getuint8(in, &bpcc->bpcs[i])) {
return -1;
}
}
return 0;
}
static void jp2_colr_dumpdata(jp2_box_t *box, FILE *out)
{
jp2_colr_t *colr = &box->data.colr;
fprintf(out, "method=%d; pri=%d; approx=%d\n", (int)colr->method, (int)colr->pri, (int)colr->approx);
switch (colr->method) {
case JP2_COLR_ENUM:
fprintf(out, "csid=%d\n", (int)colr->csid);
break;
case JP2_COLR_ICC:
jas_memdump(out, colr->iccp, colr->iccplen);
break;
}
}
static int jp2_colr_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_colr_t *colr = &box->data.colr;
colr->csid = 0;
colr->iccp = 0;
colr->iccplen = 0;
if (jp2_getuint8(in, &colr->method) || jp2_getuint8(in, &colr->pri) ||
jp2_getuint8(in, &colr->approx)) {
return -1;
}
switch (colr->method) {
case JP2_COLR_ENUM:
if (jp2_getuint32(in, &colr->csid)) {
return -1;
}
break;
case JP2_COLR_ICC:
colr->iccplen = box->datalen - 3;
if (!(colr->iccp = jas_alloc2(colr->iccplen, sizeof(uint_fast8_t)))) {
return -1;
}
if (jas_stream_read(in, colr->iccp, colr->iccplen) != colr->iccplen) {
return -1;
}
break;
}
return 0;
}
static void jp2_cdef_dumpdata(jp2_box_t *box, FILE *out)
{
jp2_cdef_t *cdef = &box->data.cdef;
unsigned int i;
for (i = 0; i < cdef->numchans; ++i) {
fprintf(out,
"channo=%"PRIuFAST16"; type=%"PRIuFAST16"; assoc=%"PRIuFAST16"\n",
cdef->ents[i].channo, cdef->ents[i].type, cdef->ents[i].assoc);
}
}
static void jp2_colr_destroy(jp2_box_t *box)
{
jp2_colr_t *colr = &box->data.colr;
if (colr->iccp) {
jas_free(colr->iccp);
}
}
static int jp2_cdef_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_cdef_t *cdef = &box->data.cdef;
jp2_cdefchan_t *chan;
unsigned int channo;
if (jp2_getuint16(in, &cdef->numchans)) {
return -1;
}
if (!(cdef->ents = jas_alloc2(cdef->numchans, sizeof(jp2_cdefchan_t)))) {
return -1;
}
for (channo = 0; channo < cdef->numchans; ++channo) {
chan = &cdef->ents[channo];
if (jp2_getuint16(in, &chan->channo) || jp2_getuint16(in, &chan->type) ||
jp2_getuint16(in, &chan->assoc)) {
return -1;
}
}
return 0;
}
/******************************************************************************\
* Box output.
\******************************************************************************/
int jp2_box_put(jp2_box_t *box, jas_stream_t *out)
{
jas_stream_t *tmpstream;
bool extlen;
bool dataflag;
tmpstream = 0;
dataflag = !(box->info->flags & (JP2_BOX_SUPER | JP2_BOX_NODATA));
if (dataflag) {
if (!(tmpstream = jas_stream_memopen(0, 0))) {
goto error;
}
if (box->ops->putdata) {
if ((*box->ops->putdata)(box, tmpstream)) {
goto error;
}
}
box->len = jas_stream_tell(tmpstream) + JP2_BOX_HDRLEN(false);
jas_stream_rewind(tmpstream);
}
extlen = (box->len >= (((uint_fast64_t)1) << 32)) != 0;
if (jp2_putuint32(out, extlen ? 1 : box->len)) {
goto error;
}
if (jp2_putuint32(out, box->type)) {
goto error;
}
if (extlen) {
if (jp2_putuint64(out, box->len)) {
goto error;
}
}
if (dataflag) {
if (jas_stream_copy(out, tmpstream, box->len - JP2_BOX_HDRLEN(false))) {
goto error;
}
jas_stream_close(tmpstream);
}
return 0;
error:
if (tmpstream) {
jas_stream_close(tmpstream);
}
return -1;
}
static int jp2_jp_putdata(jp2_box_t *box, jas_stream_t *out)
{
jp2_jp_t *jp = &box->data.jp;
if (jp2_putuint32(out, jp->magic)) {
return -1;
}
return 0;
}
static int jp2_ftyp_putdata(jp2_box_t *box, jas_stream_t *out)
{
jp2_ftyp_t *ftyp = &box->data.ftyp;
unsigned int i;
if (jp2_putuint32(out, ftyp->majver) || jp2_putuint32(out, ftyp->minver)) {
return -1;
}
for (i = 0; i < ftyp->numcompatcodes; ++i) {
if (jp2_putuint32(out, ftyp->compatcodes[i])) {
return -1;
}
}
return 0;
}
static int jp2_ihdr_putdata(jp2_box_t *box, jas_stream_t *out)
{
jp2_ihdr_t *ihdr = &box->data.ihdr;
if (jp2_putuint32(out, ihdr->height) || jp2_putuint32(out, ihdr->width) ||
jp2_putuint16(out, ihdr->numcmpts) || jp2_putuint8(out, ihdr->bpc) ||
jp2_putuint8(out, ihdr->comptype) || jp2_putuint8(out, ihdr->csunk) ||
jp2_putuint8(out, ihdr->ipr)) {
return -1;
}
return 0;
}
static int jp2_bpcc_putdata(jp2_box_t *box, jas_stream_t *out)
{
jp2_bpcc_t *bpcc = &box->data.bpcc;
unsigned int i;
for (i = 0; i < bpcc->numcmpts; ++i) {
if (jp2_putuint8(out, bpcc->bpcs[i])) {
return -1;
}
}
return 0;
}
static int jp2_colr_putdata(jp2_box_t *box, jas_stream_t *out)
{
jp2_colr_t *colr = &box->data.colr;
if (jp2_putuint8(out, colr->method) || jp2_putuint8(out, colr->pri) ||
jp2_putuint8(out, colr->approx)) {
return -1;
}
switch (colr->method) {
case JP2_COLR_ENUM:
if (jp2_putuint32(out, colr->csid)) {
return -1;
}
break;
case JP2_COLR_ICC:
if (jas_stream_write(out, colr->iccp,
JAS_CAST(int, colr->iccplen)) != JAS_CAST(int, colr->iccplen))
return -1;
break;
}
return 0;
}
static int jp2_cdef_putdata(jp2_box_t *box, jas_stream_t *out)
{
jp2_cdef_t *cdef = &box->data.cdef;
unsigned int i;
jp2_cdefchan_t *ent;
if (jp2_putuint16(out, cdef->numchans)) {
return -1;
}
for (i = 0; i < cdef->numchans; ++i) {
ent = &cdef->ents[i];
if (jp2_putuint16(out, ent->channo) ||
jp2_putuint16(out, ent->type) ||
jp2_putuint16(out, ent->assoc)) {
return -1;
}
}
return 0;
}
/******************************************************************************\
* Input operations for primitive types.
\******************************************************************************/
static int jp2_getuint8(jas_stream_t *in, uint_fast8_t *val)
{
int c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
if (val) {
*val = c;
}
return 0;
}
static int jp2_getuint16(jas_stream_t *in, uint_fast16_t *val)
{
uint_fast16_t v;
int c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if (val) {
*val = v;
}
return 0;
}
static int jp2_getuint32(jas_stream_t *in, uint_fast32_t *val)
{
uint_fast32_t v;
int c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
if (val) {
*val = v;
}
return 0;
}
static int jp2_getuint64(jas_stream_t *in, uint_fast64_t *val)
{
uint_fast64_t tmpval;
int i;
int c;
tmpval = 0;
for (i = 0; i < 8; ++i) {
tmpval <<= 8;
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
tmpval |= (c & 0xff);
}
*val = tmpval;
return 0;
}
/******************************************************************************\
* Output operations for primitive types.
\******************************************************************************/
static int jp2_putuint8(jas_stream_t *out, uint_fast8_t val)
{
if (jas_stream_putc(out, val & 0xff) == EOF) {
return -1;
}
return 0;
}
static int jp2_putuint16(jas_stream_t *out, uint_fast16_t val)
{
if (jas_stream_putc(out, (val >> 8) & 0xff) == EOF ||
jas_stream_putc(out, val & 0xff) == EOF) {
return -1;
}
return 0;
}
static int jp2_putuint32(jas_stream_t *out, uint_fast32_t val)
{
if (jas_stream_putc(out, (val >> 24) & 0xff) == EOF ||
jas_stream_putc(out, (val >> 16) & 0xff) == EOF ||
jas_stream_putc(out, (val >> 8) & 0xff) == EOF ||
jas_stream_putc(out, val & 0xff) == EOF) {
return -1;
}
return 0;
}
static int jp2_putuint64(jas_stream_t *out, uint_fast64_t val)
{
if (jp2_putuint32(out, (val >> 32) & 0xffffffffUL) ||
jp2_putuint32(out, val & 0xffffffffUL)) {
return -1;
}
return 0;
}
/******************************************************************************\
* Miscellaneous code.
\******************************************************************************/
jp2_boxinfo_t *jp2_boxinfolookup(int type)
{
jp2_boxinfo_t *boxinfo;
for (boxinfo = jp2_boxinfos; boxinfo->name; ++boxinfo) {
if (boxinfo->type == type) {
return boxinfo;
}
}
return &jp2_boxinfo_unk;
}
static void jp2_cmap_destroy(jp2_box_t *box)
{
jp2_cmap_t *cmap = &box->data.cmap;
if (cmap->ents) {
jas_free(cmap->ents);
}
}
static int jp2_cmap_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_cmap_t *cmap = &box->data.cmap;
jp2_cmapent_t *ent;
unsigned int i;
cmap->numchans = (box->datalen) / 4;
if (!(cmap->ents = jas_alloc2(cmap->numchans, sizeof(jp2_cmapent_t)))) {
return -1;
}
for (i = 0; i < cmap->numchans; ++i) {
ent = &cmap->ents[i];
if (jp2_getuint16(in, &ent->cmptno) ||
jp2_getuint8(in, &ent->map) ||
jp2_getuint8(in, &ent->pcol)) {
return -1;
}
}
return 0;
}
static int jp2_cmap_putdata(jp2_box_t *box, jas_stream_t *out)
{
/* Eliminate compiler warning about unused variables. */
box = 0;
out = 0;
return -1;
}
static void jp2_cmap_dumpdata(jp2_box_t *box, FILE *out)
{
jp2_cmap_t *cmap = &box->data.cmap;
unsigned int i;
jp2_cmapent_t *ent;
fprintf(out, "numchans = %d\n", (int) cmap->numchans);
for (i = 0; i < cmap->numchans; ++i) {
ent = &cmap->ents[i];
fprintf(out, "cmptno=%d; map=%d; pcol=%d\n",
(int) ent->cmptno, (int) ent->map, (int) ent->pcol);
}
}
static void jp2_pclr_destroy(jp2_box_t *box)
{
jp2_pclr_t *pclr = &box->data.pclr;
if (pclr->lutdata) {
jas_free(pclr->lutdata);
}
if (pclr->bpc)
jas_free(pclr->bpc);
}
static int jp2_pclr_getdata(jp2_box_t *box, jas_stream_t *in)
{
jp2_pclr_t *pclr = &box->data.pclr;
int lutsize;
unsigned int i;
unsigned int j;
int_fast32_t x;
pclr->lutdata = 0;
if (jp2_getuint16(in, &pclr->numlutents) ||
jp2_getuint8(in, &pclr->numchans)) {
return -1;
}
lutsize = pclr->numlutents * pclr->numchans;
if (!(pclr->lutdata = jas_alloc2(lutsize, sizeof(int_fast32_t)))) {
return -1;
}
if (!(pclr->bpc = jas_alloc2(pclr->numchans, sizeof(uint_fast8_t)))) {
return -1;
}
for (i = 0; i < pclr->numchans; ++i) {
if (jp2_getuint8(in, &pclr->bpc[i])) {
return -1;
}
}
for (i = 0; i < pclr->numlutents; ++i) {
for (j = 0; j < pclr->numchans; ++j) {
if (jp2_getint(in, (pclr->bpc[j] & 0x80) != 0,
(pclr->bpc[j] & 0x7f) + 1, &x)) {
return -1;
}
pclr->lutdata[i * pclr->numchans + j] = x;
}
}
return 0;
}
static int jp2_pclr_putdata(jp2_box_t *box, jas_stream_t *out)
{
#if 0
jp2_pclr_t *pclr = &box->data.pclr;
#endif
/* Eliminate warning about unused variable. */
box = 0;
out = 0;
return -1;
}
static void jp2_pclr_dumpdata(jp2_box_t *box, FILE *out)
{
jp2_pclr_t *pclr = &box->data.pclr;
unsigned int i;
int j;
fprintf(out, "numents=%d; numchans=%d\n", (int) pclr->numlutents,
(int) pclr->numchans);
for (i = 0; i < pclr->numlutents; ++i) {
for (j = 0; j < pclr->numchans; ++j) {
fprintf(out, "LUT[%d][%d]=%"PRIiFAST32"\n", i, j,
pclr->lutdata[i * pclr->numchans + j]);
}
}
}
static int jp2_getint(jas_stream_t *in, int s, int n, int_fast32_t *val)
{
int c;
int i;
uint_fast32_t v;
int m;
m = (n + 7) / 8;
v = 0;
for (i = 0; i < m; ++i) {
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v = (v << 8) | c;
}
v &= ONES(n);
if (s) {
int sb;
sb = v & (1 << (8 * m - 1));
*val = ((~v) + 1) & ONES(8 * m);
if (sb) {
*val = -*val;
}
} else {
*val = v;
}
return 0;
}
jp2_cdefchan_t *jp2_cdef_lookup(jp2_cdef_t *cdef, int channo)
{
unsigned int i;
jp2_cdefchan_t *cdefent;
for (i = 0; i < cdef->numchans; ++i) {
cdefent = &cdef->ents[i];
if (cdefent->channo == JAS_CAST(unsigned int, channo)) {
return cdefent;
}
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3223_1 |
crossvul-cpp_data_bad_5488_0 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-476/c/bad_5488_0 |
crossvul-cpp_data_bad_623_0 | /*
* TUN - Universal TUN/TAP device driver.
* Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
*/
/*
* Changes:
*
* Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
* Add TUNSETLINK ioctl to set the link encapsulation
*
* Mark Smith <markzzzsmith@yahoo.com.au>
* Use eth_random_addr() for tap MAC address.
*
* Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
* Fixes in packet dropping, queue length setting and queue wakeup.
* Increased default tx queue length.
* Added ethtool API.
* Minor cleanups
*
* Daniel Podlejski <underley@underley.eu.org>
* Modifications for 2.3.99-pre5 kernel.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "tun"
#define DRV_VERSION "1.6"
#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/miscdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
#include <linux/skb_array.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/uaccess.h>
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
#ifdef TUN_DEBUG
static int debug;
#define tun_debug(level, tun, fmt, args...) \
do { \
if (tun->debug) \
netdev_printk(level, tun->dev, fmt, ##args); \
} while (0)
#define DBG1(level, fmt, args...) \
do { \
if (debug == 2) \
printk(level fmt, ##args); \
} while (0)
#else
#define tun_debug(level, tun, fmt, args...) \
do { \
if (0) \
netdev_printk(level, tun->dev, fmt, ##args); \
} while (0)
#define DBG1(level, fmt, args...) \
do { \
if (0) \
printk(level fmt, ##args); \
} while (0)
#endif
#define TUN_HEADROOM 256
#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */
/* IFF_ATTACH_QUEUE is never stored in device flags,
* overload it to mean fasync when stored there.
*/
#define TUN_FASYNC IFF_ATTACH_QUEUE
/* High bits in flags field are unused. */
#define TUN_VNET_LE 0x80000000
#define TUN_VNET_BE 0x40000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_MULTI_QUEUE)
#define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8
struct tap_filter {
unsigned int count; /* Number of addrs. Zero means disabled */
u32 mask[2]; /* Mask of the hashed addrs */
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
* to max number of VCPUs in guest. */
#define MAX_TAP_QUEUES 256
#define MAX_TAP_FLOWS 4096
#define TUN_FLOW_EXPIRE (3 * HZ)
struct tun_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
u32 rx_frame_errors;
};
/* A tun_file connects an open character device to a tuntap netdevice. It
* also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
* tap_filter were kept in tun_struct since they were used for filtering for the
* netdevice not for a specific queue (at least I didn't see the requirement for
* this).
*
* RCU usage:
* The tun_file and tun_struct are loosely coupled, the pointer from one to the
* other can only be read while rcu_read_lock or rtnl_lock is held.
*/
struct tun_file {
struct sock sk;
struct socket socket;
struct socket_wq wq;
struct tun_struct __rcu *tun;
struct fasync_struct *fasync;
/* only used for fasnyc */
unsigned int flags;
union {
u16 queue_index;
unsigned int ifindex;
};
struct list_head next;
struct tun_struct *detached;
struct skb_array tx_array;
};
struct tun_flow_entry {
struct hlist_node hash_link;
struct rcu_head rcu;
struct tun_struct *tun;
u32 rxhash;
u32 rps_rxhash;
int queue_index;
unsigned long updated;
};
#define TUN_NUM_FLOW_ENTRIES 1024
/* Since the socket were moved to tun_file, to preserve the behavior of persist
* device, socket filter, sndbuf and vnet header size were restore when the
* file were attached to a persist device.
*/
struct tun_struct {
struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
unsigned int numqueues;
unsigned int flags;
kuid_t owner;
kgid_t group;
struct net_device *dev;
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6)
int align;
int vnet_hdr_sz;
int sndbuf;
struct tap_filter txflt;
struct sock_fprog fprog;
/* protected by rtnl lock */
bool filter_attached;
#ifdef TUN_DEBUG
int debug;
#endif
spinlock_t lock;
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
struct timer_list flow_gc_timer;
unsigned long ageing_time;
unsigned int numdisabled;
struct list_head disabled;
void *security;
u32 flow_count;
u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
struct bpf_prog __rcu *xdp_prog;
};
#ifdef CONFIG_TUN_VNET_CROSS_LE
static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
{
return tun->flags & TUN_VNET_BE ? false :
virtio_legacy_is_little_endian();
}
static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
{
int be = !!(tun->flags & TUN_VNET_BE);
if (put_user(be, argp))
return -EFAULT;
return 0;
}
static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
{
int be;
if (get_user(be, argp))
return -EFAULT;
if (be)
tun->flags |= TUN_VNET_BE;
else
tun->flags &= ~TUN_VNET_BE;
return 0;
}
#else
static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
{
return virtio_legacy_is_little_endian();
}
static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
{
return -EINVAL;
}
static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
{
return -EINVAL;
}
#endif /* CONFIG_TUN_VNET_CROSS_LE */
static inline bool tun_is_little_endian(struct tun_struct *tun)
{
return tun->flags & TUN_VNET_LE ||
tun_legacy_is_little_endian(tun);
}
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
{
return __virtio16_to_cpu(tun_is_little_endian(tun), val);
}
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
{
return __cpu_to_virtio16(tun_is_little_endian(tun), val);
}
static inline u32 tun_hashfn(u32 rxhash)
{
return rxhash & 0x3ff;
}
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
{
struct tun_flow_entry *e;
hlist_for_each_entry_rcu(e, head, hash_link) {
if (e->rxhash == rxhash)
return e;
}
return NULL;
}
static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
struct hlist_head *head,
u32 rxhash, u16 queue_index)
{
struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e) {
tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
e->rps_rxhash = 0;
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
++tun->flow_count;
}
return e;
}
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
}
static void tun_flow_flush(struct tun_struct *tun)
{
int i;
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
tun_flow_delete(tun, e);
}
spin_unlock_bh(&tun->lock);
}
static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
{
int i;
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
if (e->queue_index == queue_index)
tun_flow_delete(tun, e);
}
}
spin_unlock_bh(&tun->lock);
}
static void tun_flow_cleanup(unsigned long data)
{
struct tun_struct *tun = (struct tun_struct *)data;
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
int i;
tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
unsigned long this_timer;
count++;
this_timer = e->updated + delay;
if (time_before_eq(this_timer, jiffies))
tun_flow_delete(tun, e);
else if (time_before(this_timer, next_timer))
next_timer = this_timer;
}
}
if (count)
mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
spin_unlock_bh(&tun->lock);
}
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
struct tun_file *tfile)
{
struct hlist_head *head;
struct tun_flow_entry *e;
unsigned long delay = tun->ageing_time;
u16 queue_index = tfile->queue_index;
if (!rxhash)
return;
else
head = &tun->flows[tun_hashfn(rxhash)];
rcu_read_lock();
/* We may get a very small possibility of OOO during switching, not
* worth to optimize.*/
if (tun->numqueues == 1 || tfile->detached)
goto unlock;
e = tun_flow_find(head, rxhash);
if (likely(e)) {
/* TODO: keep queueing to old queue until it's empty? */
e->queue_index = queue_index;
e->updated = jiffies;
sock_rps_record_flow_hash(e->rps_rxhash);
} else {
spin_lock_bh(&tun->lock);
if (!tun_flow_find(head, rxhash) &&
tun->flow_count < MAX_TAP_FLOWS)
tun_flow_create(tun, head, rxhash, queue_index);
if (!timer_pending(&tun->flow_gc_timer))
mod_timer(&tun->flow_gc_timer,
round_jiffies_up(jiffies + delay));
spin_unlock_bh(&tun->lock);
}
unlock:
rcu_read_unlock();
}
/**
* Save the hash received in the stack receive path and update the
* flow_hash table accordingly.
*/
static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
{
if (unlikely(e->rps_rxhash != hash))
e->rps_rxhash = hash;
}
/* We try to identify a flow through its rxhash first. The reason that
* we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
u32 txq = 0;
u32 numqueues = 0;
rcu_read_lock();
numqueues = ACCESS_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
if (txq) {
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
if (e) {
tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
} else
/* use multiply and shift instead of expensive divide */
txq = ((u64)txq * numqueues) >> 32;
} else if (likely(skb_rx_queue_recorded(skb))) {
txq = skb_get_rx_queue(skb);
while (unlikely(txq >= numqueues))
txq -= numqueues;
}
rcu_read_unlock();
return txq;
}
static inline bool tun_not_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
struct net *net = dev_net(tun->dev);
return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
(gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
!ns_capable(net->user_ns, CAP_NET_ADMIN);
}
static void tun_set_real_num_queues(struct tun_struct *tun)
{
netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
}
static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
{
tfile->detached = tun;
list_add_tail(&tfile->next, &tun->disabled);
++tun->numdisabled;
}
static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
{
struct tun_struct *tun = tfile->detached;
tfile->detached = NULL;
list_del_init(&tfile->next);
--tun->numdisabled;
return tun;
}
static void tun_queue_purge(struct tun_file *tfile)
{
struct sk_buff *skb;
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
struct tun_struct *tun;
tun = rtnl_dereference(tfile->tun);
if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
--tun->numqueues;
if (clean) {
RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
} else
tun_disable_queue(tun, tfile);
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
/* Drop read queue */
tun_queue_purge(tfile);
tun_set_real_num_queues(tun);
} else if (tfile->detached && clean) {
tun = tun_enable_queue(tfile);
sock_put(&tfile->sk);
}
if (clean) {
if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
netif_carrier_off(tun->dev);
if (!(tun->flags & IFF_PERSIST) &&
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
if (tun)
skb_array_cleanup(&tfile->tx_array);
sock_put(&tfile->sk);
}
}
static void tun_detach(struct tun_file *tfile, bool clean)
{
rtnl_lock();
__tun_detach(tfile, clean);
rtnl_unlock();
}
static void tun_detach_all(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog);
struct tun_file *tfile, *tmp;
int i, n = tun->numqueues;
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
synchronize_net();
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
BUG_ON(tun->numdisabled != 0);
if (xdp_prog)
bpf_prog_put(xdp_prog);
if (tun->flags & IFF_PERSIST)
module_put(THIS_MODULE);
}
static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
int err;
err = security_tun_dev_attach(tfile->socket.sk, tun->security);
if (err < 0)
goto out;
err = -EINVAL;
if (rtnl_dereference(tfile->tun) && !tfile->detached)
goto out;
err = -EBUSY;
if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
goto out;
err = -E2BIG;
if (!tfile->detached &&
tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
goto out;
err = 0;
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
lock_sock(tfile->socket.sk);
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
release_sock(tfile->socket.sk);
if (!err)
goto out;
}
if (!tfile->detached &&
skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
err = -ENOMEM;
goto out;
}
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
if (tfile->detached)
tun_enable_queue(tfile);
else
sock_hold(&tfile->sk);
tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra
* refcnt.
*/
out:
return err;
}
static struct tun_struct *__tun_get(struct tun_file *tfile)
{
struct tun_struct *tun;
rcu_read_lock();
tun = rcu_dereference(tfile->tun);
if (tun)
dev_hold(tun->dev);
rcu_read_unlock();
return tun;
}
static struct tun_struct *tun_get(struct file *file)
{
return __tun_get(file->private_data);
}
static void tun_put(struct tun_struct *tun)
{
dev_put(tun->dev);
}
/* TAP filtering */
static void addr_hash_set(u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
mask[n >> 5] |= (1 << (n & 31));
}
static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
return mask[n >> 5] & (1 << (n & 31));
}
static int update_filter(struct tap_filter *filter, void __user *arg)
{
struct { u8 u[ETH_ALEN]; } *addr;
struct tun_filter uf;
int err, alen, n, nexact;
if (copy_from_user(&uf, arg, sizeof(uf)))
return -EFAULT;
if (!uf.count) {
/* Disabled */
filter->count = 0;
return 0;
}
alen = ETH_ALEN * uf.count;
addr = memdup_user(arg + sizeof(uf), alen);
if (IS_ERR(addr))
return PTR_ERR(addr);
/* The filter is updated without holding any locks. Which is
* perfectly safe. We disable it first and in the worst
* case we'll accept a few undesired packets. */
filter->count = 0;
wmb();
/* Use first set of addresses as an exact filter */
for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
nexact = n;
/* Remaining multicast addresses are hashed,
* unicast will leave the filter disabled. */
memset(filter->mask, 0, sizeof(filter->mask));
for (; n < uf.count; n++) {
if (!is_multicast_ether_addr(addr[n].u)) {
err = 0; /* no filter */
goto free_addr;
}
addr_hash_set(filter->mask, addr[n].u);
}
/* For ALLMULTI just set the mask to all ones.
* This overrides the mask populated above. */
if ((uf.flags & TUN_FLT_ALLMULTI))
memset(filter->mask, ~0, sizeof(filter->mask));
/* Now enable the filter */
wmb();
filter->count = nexact;
/* Return the number of exact filters */
err = nexact;
free_addr:
kfree(addr);
return err;
}
/* Returns: 0 - drop, !=0 - accept */
static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
{
/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
* at this point. */
struct ethhdr *eh = (struct ethhdr *) skb->data;
int i;
/* Exact match */
for (i = 0; i < filter->count; i++)
if (ether_addr_equal(eh->h_dest, filter->addr[i]))
return 1;
/* Inexact match (multicast only) */
if (is_multicast_ether_addr(eh->h_dest))
return addr_hash_test(filter->mask, eh->h_dest);
return 0;
}
/*
* Checks whether the packet is accepted or not.
* Returns: 0 - drop, !=0 - accept
*/
static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
{
if (!filter->count)
return 1;
return run_filter(filter, skb);
}
/* Network device part of the driver */
static const struct ethtool_ops tun_ethtool_ops;
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
tun_detach_all(dev);
}
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
int i;
netif_tx_start_all_queues(dev);
for (i = 0; i < tun->numqueues; i++) {
struct tun_file *tfile;
tfile = rtnl_dereference(tun->tfiles[i]);
tfile->socket.sk->sk_write_space(tfile->socket.sk);
}
return 0;
}
/* Net device close. */
static int tun_net_close(struct net_device *dev)
{
netif_tx_stop_all_queues(dev);
return 0;
}
/* Net device start xmit */
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
int txq = skb->queue_mapping;
struct tun_file *tfile;
u32 numqueues = 0;
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
numqueues = ACCESS_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
if (txq >= numqueues)
goto drop;
#ifdef CONFIG_RPS
if (numqueues == 1 && static_key_false(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
__u32 rxhash;
rxhash = __skb_get_hash_symmetric(skb);
if (rxhash) {
struct tun_flow_entry *e;
e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
rxhash);
if (e)
tun_flow_save_rps_rxhash(e, rxhash);
}
}
#endif
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
BUG_ON(!tfile);
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
if (!check_filter(&tun->txflt, skb))
goto drop;
if (tfile->socket.sk->sk_filter &&
sk_filter(tfile->socket.sk, skb))
goto drop;
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
goto drop;
skb_tx_timestamp(skb);
/* Orphan the skb - required as we might hang on to it
* for indefinite time.
*/
skb_orphan(skb);
nf_reset(skb);
if (skb_array_produce(&tfile->tx_array, skb))
goto drop;
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
rcu_read_unlock();
return NETDEV_TX_OK;
drop:
this_cpu_inc(tun->pcpu_stats->tx_dropped);
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
return NET_XMIT_DROP;
}
static void tun_net_mclist(struct net_device *dev)
{
/*
* This callback is supposed to deal with mc filter in
* _rx_ path and has nothing to do with the _tx_ path.
* In rx path we always accept everything userspace gives us.
*/
}
static netdev_features_t tun_net_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct tun_struct *tun = netdev_priv(dev);
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tun_poll_controller(struct net_device *dev)
{
/*
* Tun only receives frames when:
* 1) the char device endpoint gets data from user space
* 2) the tun socket gets a sendmsg call from user space
* Since both of those are synchronous operations, we are guaranteed
* never to have pending data when we poll for it
* so there is nothing to do here but return.
* We need this though so netpoll recognizes us as an interface that
* supports polling, which enables bridge devices in virt setups to
* still use netconsole
*/
return;
}
#endif
static void tun_set_headroom(struct net_device *dev, int new_hr)
{
struct tun_struct *tun = netdev_priv(dev);
if (new_hr < NET_SKB_PAD)
new_hr = NET_SKB_PAD;
tun->align = new_hr;
}
static void
tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
struct tun_struct *tun = netdev_priv(dev);
struct tun_pcpu_stats *p;
int i;
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, txpackets, txbytes;
unsigned int start;
p = per_cpu_ptr(tun->pcpu_stats, i);
do {
start = u64_stats_fetch_begin(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* u32 counters */
rx_dropped += p->rx_dropped;
rx_frame_errors += p->rx_frame_errors;
tx_dropped += p->tx_dropped;
}
stats->rx_dropped = rx_dropped;
stats->rx_frame_errors = rx_frame_errors;
stats->tx_dropped = tx_dropped;
}
static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct tun_struct *tun = netdev_priv(dev);
struct bpf_prog *old_prog;
old_prog = rtnl_dereference(tun->xdp_prog);
rcu_assign_pointer(tun->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
return 0;
}
static u32 tun_xdp_query(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
const struct bpf_prog *xdp_prog;
xdp_prog = rtnl_dereference(tun->xdp_prog);
if (xdp_prog)
return xdp_prog->aux->id;
return 0;
}
static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return tun_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_id = tun_xdp_query(dev);
xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
}
}
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_fix_features = tun_net_fix_features,
.ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
};
static const struct net_device_ops tap_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_fix_features = tun_net_fix_features,
.ndo_set_rx_mode = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_xdp = tun_xdp,
};
static void tun_flow_init(struct tun_struct *tun)
{
int i;
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
INIT_HLIST_HEAD(&tun->flows[i]);
tun->ageing_time = TUN_FLOW_EXPIRE;
setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
mod_timer(&tun->flow_gc_timer,
round_jiffies_up(jiffies + tun->ageing_time));
}
static void tun_flow_uninit(struct tun_struct *tun)
{
del_timer_sync(&tun->flow_gc_timer);
tun_flow_flush(tun);
}
#define MIN_MTU 68
#define MAX_MTU 65535
/* Initialize net device. */
static void tun_net_init(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
dev->netdev_ops = &tun_netdev_ops;
/* Point-to-Point TUN Device */
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->mtu = 1500;
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
break;
case IFF_TAP:
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
eth_hw_addr_random(dev);
break;
}
dev->min_mtu = MIN_MTU;
dev->max_mtu = MAX_MTU - dev->hard_header_len;
}
/* Character device part */
/* Poll */
static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
struct sock *sk;
unsigned int mask = 0;
if (!tun)
return POLLERR;
sk = tfile->socket.sk;
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
poll_wait(file, sk_sleep(sk), wait);
if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
if (tun->dev->flags & IFF_UP &&
(sock_writeable(sk) ||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
sock_writeable(sk))))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
mask = POLLERR;
tun_put(tun);
return mask;
}
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
size_t prepad, size_t len,
size_t linear, int noblock)
{
struct sock *sk = tfile->socket.sk;
struct sk_buff *skb;
int err;
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
&err, 0);
if (!skb)
return ERR_PTR(err);
skb_reserve(skb, prepad);
skb_put(skb, linear);
skb->data_len = len - linear;
skb->len += len - linear;
return skb;
}
static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
struct sk_buff *skb, int more)
{
struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
struct sk_buff_head process_queue;
u32 rx_batched = tun->rx_batched;
bool rcv = false;
if (!rx_batched || (!more && skb_queue_empty(queue))) {
local_bh_disable();
netif_receive_skb(skb);
local_bh_enable();
return;
}
spin_lock(&queue->lock);
if (!more || skb_queue_len(queue) == rx_batched) {
__skb_queue_head_init(&process_queue);
skb_queue_splice_tail_init(queue, &process_queue);
rcv = true;
} else {
__skb_queue_tail(queue, skb);
}
spin_unlock(&queue->lock);
if (rcv) {
struct sk_buff *nskb;
local_bh_disable();
while ((nskb = __skb_dequeue(&process_queue)))
netif_receive_skb(nskb);
netif_receive_skb(skb);
local_bh_enable();
}
}
static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
int len, int noblock, bool zerocopy)
{
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
return false;
if (tfile->socket.sk->sk_sndbuf != INT_MAX)
return false;
if (!noblock)
return false;
if (zerocopy)
return false;
if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
return false;
return true;
}
static struct sk_buff *tun_build_skb(struct tun_struct *tun,
struct tun_file *tfile,
struct iov_iter *from,
struct virtio_net_hdr *hdr,
int len, int *skb_xdp)
{
struct page_frag *alloc_frag = ¤t->task_frag;
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
unsigned int delta = 0;
char *buf;
size_t copied;
bool xdp_xmit = false;
int err, pad = TUN_RX_PAD;
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog)
pad += TUN_HEADROOM;
buflen += SKB_DATA_ALIGN(len + pad);
rcu_read_unlock();
if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
copied = copy_page_from_iter(alloc_frag->page,
alloc_frag->offset + pad,
len, from);
if (copied != len)
return ERR_PTR(-EFAULT);
/* There's a small window that XDP may be set after the check
* of xdp_prog above, this should be rare and for simplicity
* we do XDP on skb in case the headroom is not enough.
*/
if (hdr->gso_type || !xdp_prog)
*skb_xdp = 1;
else
*skb_xdp = 0;
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog && !*skb_xdp) {
struct xdp_buff xdp;
void *orig_data;
u32 act;
xdp.data_hard_start = buf;
xdp.data = buf + pad;
xdp.data_end = xdp.data + len;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
switch (act) {
case XDP_REDIRECT:
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
return NULL;
case XDP_TX:
xdp_xmit = true;
/* fall through */
case XDP_PASS:
delta = orig_data - xdp.data;
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
case XDP_ABORTED:
trace_xdp_exception(tun->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
goto err_xdp;
}
}
skb = build_skb(buf, buflen);
if (!skb) {
rcu_read_unlock();
return ERR_PTR(-ENOMEM);
}
skb_reserve(skb, pad - delta);
skb_put(skb, len + delta);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
if (xdp_xmit) {
skb->dev = tun->dev;
generic_xdp_tx(skb, xdp_prog);
rcu_read_lock();
return NULL;
}
rcu_read_unlock();
return skb;
err_redirect:
put_page(alloc_frag->page);
err_xdp:
rcu_read_unlock();
this_cpu_inc(tun->pcpu_stats->rx_dropped);
return NULL;
}
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t total_len = iov_iter_count(from);
size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
struct tun_pcpu_stats *stats;
int good_linear;
int copylen;
bool zerocopy = false;
int err;
u32 rxhash;
int skb_xdp = 1;
if (!(tun->dev->flags & IFF_UP))
return -EIO;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
len -= sizeof(pi);
if (!copy_from_iter_full(&pi, sizeof(pi), from))
return -EFAULT;
}
if (tun->flags & IFF_VNET_HDR) {
int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
if (len < vnet_hdr_sz)
return -EINVAL;
len -= vnet_hdr_sz;
if (!copy_from_iter_full(&gso, sizeof(gso), from))
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
if (tun16_to_cpu(tun, gso.hdr_len) > len)
return -EINVAL;
iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
}
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN;
if (unlikely(len < ETH_HLEN ||
(gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
return -EINVAL;
}
good_linear = SKB_MAX_HEAD(align);
if (msg_control) {
struct iov_iter i = *from;
/* There are 256 bytes to be copied in skb, so there is
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
iov_iter_advance(&i, copylen);
if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
zerocopy = true;
}
if (tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
/* For the packet that is not easy to be processed
* (e.g gso or jumbo packet), we will do it at after
* skb was created with generic XDP routine.
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
return PTR_ERR(skb);
}
if (!skb)
return total_len;
} else {
if (!zerocopy) {
copylen = len;
if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
linear = good_linear;
else
linear = tun16_to_cpu(tun, gso.hdr_len);
}
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
this_cpu_inc(tun->pcpu_stats->rx_dropped);
return PTR_ERR(skb);
}
if (zerocopy)
err = zerocopy_sg_from_iter(skb, from);
else
err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (err) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EFAULT;
}
}
if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
switch (ip_version) {
case 4:
pi.proto = htons(ETH_P_IP);
break;
case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EINVAL;
}
}
skb_reset_mac_header(skb);
skb->protocol = pi.proto;
skb->dev = tun->dev;
break;
case IFF_TAP:
skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
uarg->callback(uarg, false);
}
skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
if (skb_xdp) {
struct bpf_prog *xdp_prog;
int ret;
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
ret = do_xdp_generic(xdp_prog, skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
return total_len;
}
}
rcu_read_unlock();
}
rxhash = __skb_get_hash_symmetric(skb);
#ifndef CONFIG_4KSTACKS
tun_rx_batched(tun, tfile, skb, more);
#else
netif_rx_ni(skb);
#endif
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += len;
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(stats);
tun_flow_update(tun, rxhash, tfile);
return total_len;
}
static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct tun_struct *tun = tun_get(file);
struct tun_file *tfile = file->private_data;
ssize_t result;
if (!tun)
return -EBADFD;
result = tun_get_user(tun, tfile, NULL, from,
file->f_flags & O_NONBLOCK, false);
tun_put(tun);
return result;
}
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
struct sk_buff *skb,
struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
struct tun_pcpu_stats *stats;
ssize_t total;
int vlan_offset = 0;
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
if (skb_vlan_tag_present(skb))
vlan_hlen = VLAN_HLEN;
if (tun->flags & IFF_VNET_HDR)
vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
total = skb->len + vlan_hlen + vnet_hdr_sz;
if (!(tun->flags & IFF_NO_PI)) {
if (iov_iter_count(iter) < sizeof(pi))
return -EINVAL;
total += sizeof(pi);
if (iov_iter_count(iter) < total) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
return -EFAULT;
}
if (vnet_hdr_sz) {
struct virtio_net_hdr gso;
if (iov_iter_count(iter) < vnet_hdr_sz)
return -EINVAL;
if (virtio_net_hdr_from_skb(skb, &gso,
tun_is_little_endian(tun), true)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
WARN_ON_ONCE(1);
return -EINVAL;
}
if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
return -EFAULT;
iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
}
if (vlan_hlen) {
int ret;
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
} veth;
veth.h_vlan_proto = skb->vlan_proto;
veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
if (ret || !iov_iter_count(iter))
goto done;
ret = copy_to_iter(&veth, sizeof(veth), iter);
if (ret != sizeof(veth) || !iov_iter_count(iter))
goto done;
}
skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
done:
/* caller is in process context, */
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += skb->len + vlan_hlen;
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
return total;
}
static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
int *err)
{
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb = NULL;
int error = 0;
skb = skb_array_consume(&tfile->tx_array);
if (skb)
goto out;
if (noblock) {
error = -EAGAIN;
goto out;
}
add_wait_queue(&tfile->wq.wait, &wait);
current->state = TASK_INTERRUPTIBLE;
while (1) {
skb = skb_array_consume(&tfile->tx_array);
if (skb)
break;
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
error = -EFAULT;
break;
}
schedule();
}
current->state = TASK_RUNNING;
remove_wait_queue(&tfile->wq.wait, &wait);
out:
*err = error;
return skb;
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
int noblock, struct sk_buff *skb)
{
ssize_t ret;
int err;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to))
return 0;
if (!skb) {
/* Read frames from ring */
skb = tun_ring_recv(tfile, noblock, &err);
if (!skb)
return err;
}
ret = tun_put_user(tun, tfile, skb, to);
if (unlikely(ret < 0))
kfree_skb(skb);
else
consume_skb(skb);
return ret;
}
static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
ssize_t len = iov_iter_count(to), ret;
if (!tun)
return -EBADFD;
ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
tun_put(tun);
return ret;
}
static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
BUG_ON(!(list_empty(&tun->disabled)));
free_percpu(tun->pcpu_stats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
}
static void tun_setup(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
tun->owner = INVALID_UID;
tun->group = INVALID_GID;
dev->ethtool_ops = &tun_ethtool_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = tun_free_netdev;
/* We prefer our own queue length */
dev->tx_queue_len = TUN_READQ_SIZE;
}
/* Trivial set of netlink ops to allow deleting tun or tap
* device with netlink.
*/
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
return -EINVAL;
}
static struct rtnl_link_ops tun_link_ops __read_mostly = {
.kind = DRV_NAME,
.priv_size = sizeof(struct tun_struct),
.setup = tun_setup,
.validate = tun_validate,
};
static void tun_sock_write_space(struct sock *sk)
{
struct tun_file *tfile;
wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
wqueue = sk_sleep(sk);
if (wqueue && waitqueue_active(wqueue))
wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tfile = container_of(sk, struct tun_file, sk);
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
int ret;
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = __tun_get(tfile);
if (!tun)
return -EBADFD;
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
m->msg_flags & MSG_DONTWAIT,
m->msg_flags & MSG_MORE);
tun_put(tun);
return ret;
}
static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
int flags)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = __tun_get(tfile);
int ret;
if (!tun)
return -EBADFD;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
ret = -EINVAL;
goto out;
}
if (flags & MSG_ERRQUEUE) {
ret = sock_recv_errqueue(sock->sk, m, total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
m->msg_control);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
out:
tun_put(tun);
return ret;
}
static int tun_peek_len(struct socket *sock)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun;
int ret = 0;
tun = __tun_get(tfile);
if (!tun)
return 0;
ret = skb_array_peek_len(&tfile->tx_array);
tun_put(tun);
return ret;
}
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
.peek_len = tun_peek_len,
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
};
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
.obj_size = sizeof(struct tun_file),
};
static int tun_flags(struct tun_struct *tun)
{
return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
}
static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return sprintf(buf, "0x%x\n", tun_flags(tun));
}
static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return uid_valid(tun->owner)?
sprintf(buf, "%u\n",
from_kuid_munged(current_user_ns(), tun->owner)):
sprintf(buf, "-1\n");
}
static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return gid_valid(tun->group) ?
sprintf(buf, "%u\n",
from_kgid_munged(current_user_ns(), tun->group)):
sprintf(buf, "-1\n");
}
static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
static struct attribute *tun_dev_attrs[] = {
&dev_attr_tun_flags.attr,
&dev_attr_owner.attr,
&dev_attr_group.attr,
NULL
};
static const struct attribute_group tun_attr_group = {
.attrs = tun_dev_attrs
};
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
struct tun_struct *tun;
struct tun_file *tfile = file->private_data;
struct net_device *dev;
int err;
if (tfile->detached)
return -EINVAL;
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
if (ifr->ifr_flags & IFF_TUN_EXCL)
return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
tun = netdev_priv(dev);
else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
tun = netdev_priv(dev);
else
return -EINVAL;
if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
!!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
if (tun_not_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
if (err < 0)
return err;
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
if (err < 0)
return err;
if (tun->flags & IFF_MULTI_QUEUE &&
(tun->numqueues + tun->numdisabled > 1)) {
/* One or more queue has already been attached, no need
* to initialize the device again.
*/
return 0;
}
}
else {
char *name;
unsigned long flags = 0;
int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
MAX_TAP_QUEUES : 1;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_create();
if (err < 0)
return err;
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
/* TUN device */
flags |= IFF_TUN;
name = "tun%d";
} else if (ifr->ifr_flags & IFF_TAP) {
/* TAP device */
flags |= IFF_TAP;
name = "tap%d";
} else
return -EINVAL;
if (*ifr->ifr_name)
name = ifr->ifr_name;
dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
NET_NAME_UNKNOWN, tun_setup, queues,
queues);
if (!dev)
return -ENOMEM;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
dev->ifindex = tfile->ifindex;
dev->sysfs_groups[0] = &tun_attr_group;
tun = netdev_priv(dev);
tun->dev = dev;
tun->flags = flags;
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
tun->align = NET_SKB_PAD;
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
tun->rx_batched = 0;
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
if (!tun->pcpu_stats) {
err = -ENOMEM;
goto err_free_dev;
}
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
if (err < 0)
goto err_free_stat;
tun_net_init(dev);
tun_flow_init(tun);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features | NETIF_F_LLTX;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
}
netif_carrier_on(tun->dev);
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
tun->flags = (tun->flags & ~TUN_FEATURES) |
(ifr->ifr_flags & TUN_FEATURES);
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
if (netif_running(tun->dev))
netif_tx_wake_all_queues(tun->dev);
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
err_detach:
tun_detach_all(dev);
/* register_netdevice() already called tun_free_netdev() */
goto err_free_dev;
err_free_flow:
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
err_free_stat:
free_percpu(tun->pcpu_stats);
err_free_dev:
free_netdev(dev);
return err;
}
static void tun_get_iff(struct net *net, struct tun_struct *tun,
struct ifreq *ifr)
{
tun_debug(KERN_INFO, tun, "tun_get_iff\n");
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
}
/* This is like a cut-down ethtool ops, except done via tun fd so no
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
{
netdev_features_t features = 0;
if (arg & TUN_F_CSUM) {
features |= NETIF_F_HW_CSUM;
arg &= ~TUN_F_CSUM;
if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
if (arg & TUN_F_TSO_ECN) {
features |= NETIF_F_TSO_ECN;
arg &= ~TUN_F_TSO_ECN;
}
if (arg & TUN_F_TSO4)
features |= NETIF_F_TSO;
if (arg & TUN_F_TSO6)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
}
/* This gives the user a way to test for new features in future by
* trying to set them. */
if (arg)
return -EINVAL;
tun->set_features = features;
tun->dev->wanted_features &= ~TUN_USER_FEATURES;
tun->dev->wanted_features |= features;
netdev_update_features(tun->dev);
return 0;
}
static void tun_detach_filter(struct tun_struct *tun, int n)
{
int i;
struct tun_file *tfile;
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
lock_sock(tfile->socket.sk);
sk_detach_filter(tfile->socket.sk);
release_sock(tfile->socket.sk);
}
tun->filter_attached = false;
}
static int tun_attach_filter(struct tun_struct *tun)
{
int i, ret = 0;
struct tun_file *tfile;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
lock_sock(tfile->socket.sk);
ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
release_sock(tfile->socket.sk);
if (ret) {
tun_detach_filter(tun, i);
return ret;
}
}
tun->filter_attached = true;
return ret;
}
static void tun_set_sndbuf(struct tun_struct *tun)
{
struct tun_file *tfile;
int i;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
tfile->socket.sk->sk_sndbuf = tun->sndbuf;
}
}
static int tun_set_queue(struct file *file, struct ifreq *ifr)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
int ret = 0;
rtnl_lock();
if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
tun = tfile->detached;
if (!tun) {
ret = -EINVAL;
goto unlock;
}
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
ret = tun_attach(tun, file, false);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
ret = -EINVAL;
else
__tun_detach(tfile, false);
} else
ret = -EINVAL;
unlock:
rtnl_unlock();
return ret;
}
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
int sndbuf;
int vnet_hdr_sz;
unsigned int ifindex;
int le;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
} else {
memset(&ifr, 0, sizeof(ifr));
}
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
* TUNSETIFF.
*/
return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
(unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE)
return tun_set_queue(file, &ifr);
ret = 0;
rtnl_lock();
tun = __tun_get(tfile);
if (cmd == TUNSETIFF) {
ret = -EEXIST;
if (tun)
goto unlock;
ifr.ifr_name[IFNAMSIZ-1] = '\0';
ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
if (ret)
goto unlock;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
goto unlock;
}
if (cmd == TUNSETIFINDEX) {
ret = -EPERM;
if (tun)
goto unlock;
ret = -EFAULT;
if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
goto unlock;
ret = 0;
tfile->ifindex = ifindex;
goto unlock;
}
ret = -EBADFD;
if (!tun)
goto unlock;
tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
ret = 0;
switch (cmd) {
case TUNGETIFF:
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
if (tfile->detached)
ifr.ifr_flags |= IFF_DETACH_QUEUE;
if (!tfile->socket.sk->sk_filter)
ifr.ifr_flags |= IFF_NOFILTER;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case TUNSETNOCSUM:
/* Disable/Enable checksum */
/* [unimplemented] */
tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
/* Disable/Enable persist mode. Keep an extra reference to the
* module to prevent the module being unprobed.
*/
if (arg && !(tun->flags & IFF_PERSIST)) {
tun->flags |= IFF_PERSIST;
__module_get(THIS_MODULE);
}
if (!arg && (tun->flags & IFF_PERSIST)) {
tun->flags &= ~IFF_PERSIST;
module_put(THIS_MODULE);
}
tun_debug(KERN_INFO, tun, "persist %s\n",
arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
/* Set owner of the device */
owner = make_kuid(current_user_ns(), arg);
if (!uid_valid(owner)) {
ret = -EINVAL;
break;
}
tun->owner = owner;
tun_debug(KERN_INFO, tun, "owner set to %u\n",
from_kuid(&init_user_ns, tun->owner));
break;
case TUNSETGROUP:
/* Set group of the device */
group = make_kgid(current_user_ns(), arg);
if (!gid_valid(group)) {
ret = -EINVAL;
break;
}
tun->group = group;
tun_debug(KERN_INFO, tun, "group set to %u\n",
from_kgid(&init_user_ns, tun->group));
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
tun_debug(KERN_INFO, tun,
"Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
tun_debug(KERN_INFO, tun, "linktype set to %d\n",
tun->dev->type);
ret = 0;
}
break;
#ifdef TUN_DEBUG
case TUNSETDEBUG:
tun->debug = arg;
break;
#endif
case TUNSETOFFLOAD:
ret = set_offload(tun, arg);
break;
case TUNSETTXFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = update_filter(&tun->txflt, (void __user *)arg);
break;
case SIOCGIFHWADDR:
/* Get hw address */
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
ifr.ifr_hwaddr.sa_family = tun->dev->type;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case SIOCSIFHWADDR:
/* Set hw address */
tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
ifr.ifr_hwaddr.sa_data);
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
break;
case TUNGETSNDBUF:
sndbuf = tfile->socket.sk->sk_sndbuf;
if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
ret = -EFAULT;
break;
case TUNSETSNDBUF:
if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
ret = -EFAULT;
break;
}
tun->sndbuf = sndbuf;
tun_set_sndbuf(tun);
break;
case TUNGETVNETHDRSZ:
vnet_hdr_sz = tun->vnet_hdr_sz;
if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
ret = -EFAULT;
break;
case TUNSETVNETHDRSZ:
if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
ret = -EFAULT;
break;
}
if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
ret = -EINVAL;
break;
}
tun->vnet_hdr_sz = vnet_hdr_sz;
break;
case TUNGETVNETLE:
le = !!(tun->flags & TUN_VNET_LE);
if (put_user(le, (int __user *)argp))
ret = -EFAULT;
break;
case TUNSETVNETLE:
if (get_user(le, (int __user *)argp)) {
ret = -EFAULT;
break;
}
if (le)
tun->flags |= TUN_VNET_LE;
else
tun->flags &= ~TUN_VNET_LE;
break;
case TUNGETVNETBE:
ret = tun_get_vnet_be(tun, argp);
break;
case TUNSETVNETBE:
ret = tun_set_vnet_be(tun, argp);
break;
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
break;
ret = tun_attach_filter(tun);
break;
case TUNDETACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = 0;
tun_detach_filter(tun, tun->numqueues);
break;
case TUNGETFILTER:
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
break;
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
unlock:
rtnl_unlock();
if (tun)
tun_put(tun);
return ret;
}
static long tun_chr_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
}
#ifdef CONFIG_COMPAT
static long tun_chr_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case TUNSETIFF:
case TUNGETIFF:
case TUNSETTXFILTER:
case TUNGETSNDBUF:
case TUNSETSNDBUF:
case SIOCGIFHWADDR:
case SIOCSIFHWADDR:
arg = (unsigned long)compat_ptr(arg);
break;
default:
arg = (compat_ulong_t)arg;
break;
}
/*
* compat_ifreq is shorter than ifreq, so we must not access beyond
* the end of that structure. All fields that are used in this
* driver are compatible though, we don't need to convert the
* contents.
*/
return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
}
#endif /* CONFIG_COMPAT */
static int tun_chr_fasync(int fd, struct file *file, int on)
{
struct tun_file *tfile = file->private_data;
int ret;
if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
if (on) {
__f_setown(file, task_pid(current), PIDTYPE_PID, 0);
tfile->flags |= TUN_FASYNC;
} else
tfile->flags &= ~TUN_FASYNC;
ret = 0;
out:
return ret;
}
static int tun_chr_open(struct inode *inode, struct file * file)
{
struct net *net = current->nsproxy->net_ns;
struct tun_file *tfile;
DBG1(KERN_INFO, "tunX: tun_chr_open\n");
tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&tun_proto, 0);
if (!tfile)
return -ENOMEM;
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
init_waitqueue_head(&tfile->wq.wait);
RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
sock_init_data(&tfile->socket, &tfile->sk);
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
file->private_data = tfile;
INIT_LIST_HEAD(&tfile->next);
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
return 0;
}
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
tun_detach(tfile, true);
return 0;
}
#ifdef CONFIG_PROC_FS
static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
{
struct tun_struct *tun;
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
rtnl_lock();
tun = tun_get(f);
if (tun)
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
rtnl_unlock();
if (tun)
tun_put(tun);
seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
}
#endif
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read_iter = tun_chr_read_iter,
.write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
.unlocked_ioctl = tun_chr_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tun_chr_compat_ioctl,
#endif
.open = tun_chr_open,
.release = tun_chr_close,
.fasync = tun_chr_fasync,
#ifdef CONFIG_PROC_FS
.show_fdinfo = tun_chr_show_fdinfo,
#endif
};
static struct miscdevice tun_miscdev = {
.minor = TUN_MINOR,
.name = "tun",
.nodename = "net/tun",
.fops = &tun_fops,
};
/* ethtool interface */
static int tun_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
cmd->base.speed = SPEED_10;
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_TP;
cmd->base.phy_address = 0;
cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tun_struct *tun = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case IFF_TAP:
strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
static u32 tun_get_msglevel(struct net_device *dev)
{
#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
return tun->debug;
#else
return -EOPNOTSUPP;
#endif
}
static void tun_set_msglevel(struct net_device *dev, u32 value)
{
#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
tun->debug = value;
#endif
}
static int tun_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct tun_struct *tun = netdev_priv(dev);
ec->rx_max_coalesced_frames = tun->rx_batched;
return 0;
}
static int tun_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct tun_struct *tun = netdev_priv(dev);
if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
tun->rx_batched = NAPI_POLL_WEIGHT;
else
tun->rx_batched = ec->rx_max_coalesced_frames;
return 0;
}
static const struct ethtool_ops tun_ethtool_ops = {
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
.get_link_ksettings = tun_get_link_ksettings,
};
static int tun_queue_resize(struct tun_struct *tun)
{
struct net_device *dev = tun->dev;
struct tun_file *tfile;
struct skb_array **arrays;
int n = tun->numqueues + tun->numdisabled;
int ret, i;
arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
if (!arrays)
return -ENOMEM;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
arrays[i] = &tfile->tx_array;
}
list_for_each_entry(tfile, &tun->disabled, next)
arrays[i++] = &tfile->tx_array;
ret = skb_array_resize_multiple(arrays, n,
dev->tx_queue_len, GFP_KERNEL);
kfree(arrays);
return ret;
}
static int tun_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct tun_struct *tun = netdev_priv(dev);
if (dev->rtnl_link_ops != &tun_link_ops)
return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGE_TX_QUEUE_LEN:
if (tun_queue_resize(tun))
return NOTIFY_BAD;
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block tun_notifier_block __read_mostly = {
.notifier_call = tun_device_event,
};
static int __init tun_init(void)
{
int ret = 0;
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
ret = rtnl_link_register(&tun_link_ops);
if (ret) {
pr_err("Can't register link_ops\n");
goto err_linkops;
}
ret = misc_register(&tun_miscdev);
if (ret) {
pr_err("Can't register misc device %d\n", TUN_MINOR);
goto err_misc;
}
ret = register_netdevice_notifier(&tun_notifier_block);
if (ret) {
pr_err("Can't register netdevice notifier\n");
goto err_notifier;
}
return 0;
err_notifier:
misc_deregister(&tun_miscdev);
err_misc:
rtnl_link_unregister(&tun_link_ops);
err_linkops:
return ret;
}
static void tun_cleanup(void)
{
misc_deregister(&tun_miscdev);
rtnl_link_unregister(&tun_link_ops);
unregister_netdevice_notifier(&tun_notifier_block);
}
/* Get an underlying socket object from tun file. Returns error unless file is
* attached to a device. The returned object works like a packet socket, it
* can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
* holding a reference to the file for as long as the socket is in use. */
struct socket *tun_get_socket(struct file *file)
{
struct tun_file *tfile;
if (file->f_op != &tun_fops)
return ERR_PTR(-EINVAL);
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
return &tfile->socket;
}
EXPORT_SYMBOL_GPL(tun_get_socket);
struct skb_array *tun_get_skb_array(struct file *file)
{
struct tun_file *tfile;
if (file->f_op != &tun_fops)
return ERR_PTR(-EINVAL);
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
return &tfile->tx_array;
}
EXPORT_SYMBOL_GPL(tun_get_skb_array);
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(TUN_MINOR);
MODULE_ALIAS("devname:net/tun");
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_623_0 |
crossvul-cpp_data_bad_3060_15 | /* Request a key from userspace
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* See Documentation/security/keys-request-key.txt
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/keyctl.h>
#include <linux/slab.h>
#include "internal.h"
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
/*
* wait_on_bit() sleep function for uninterruptible waiting
*/
static int key_wait_bit(void *flags)
{
schedule();
return 0;
}
/*
* wait_on_bit() sleep function for interruptible waiting
*/
static int key_wait_bit_intr(void *flags)
{
schedule();
return signal_pending(current) ? -ERESTARTSYS : 0;
}
/**
* complete_request_key - Complete the construction of a key.
* @cons: The key construction record.
* @error: The success or failute of the construction.
*
* Complete the attempt to construct a key. The key will be negated
* if an error is indicated. The authorisation key will be revoked
* unconditionally.
*/
void complete_request_key(struct key_construction *cons, int error)
{
kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
if (error < 0)
key_negate_and_link(cons->key, key_negative_timeout, NULL,
cons->authkey);
else
key_revoke(cons->authkey);
key_put(cons->key);
key_put(cons->authkey);
kfree(cons);
}
EXPORT_SYMBOL(complete_request_key);
/*
* Initialise a usermode helper that is going to have a specific session
* keyring.
*
* This is called in context of freshly forked kthread before kernel_execve(),
* so we can simply install the desired session_keyring at this point.
*/
static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
{
struct key *keyring = info->data;
return install_session_keyring_to_cred(cred, keyring);
}
/*
* Clean up a usermode helper with session keyring.
*/
static void umh_keys_cleanup(struct subprocess_info *info)
{
struct key *keyring = info->data;
key_put(keyring);
}
/*
* Call a usermode helper with a specific session keyring.
*/
static int call_usermodehelper_keys(char *path, char **argv, char **envp,
struct key *session_keyring, int wait)
{
struct subprocess_info *info;
info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
umh_keys_init, umh_keys_cleanup,
session_keyring);
if (!info)
return -ENOMEM;
key_get(session_keyring);
return call_usermodehelper_exec(info, wait);
}
/*
* Request userspace finish the construction of a key
* - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
*/
static int call_sbin_request_key(struct key_construction *cons,
const char *op,
void *aux)
{
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
struct key *key = cons->key, *authkey = cons->authkey, *keyring,
*session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
int ret, i;
kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
ret = install_user_keyrings();
if (ret < 0)
goto error_alloc;
/* allocate a new session keyring */
sprintf(desc, "_req.%u", key->serial);
cred = get_current_cred();
keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
put_cred(cred);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error_alloc;
}
/* attach the auth key to the session keyring */
ret = key_link(keyring, authkey);
if (ret < 0)
goto error_link;
/* record the UID and GID */
sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
/* we say which key is under construction */
sprintf(key_str, "%d", key->serial);
/* we specify the process's default keyrings */
sprintf(keyring_str[0], "%d",
cred->thread_keyring ? cred->thread_keyring->serial : 0);
prkey = 0;
if (cred->process_keyring)
prkey = cred->process_keyring->serial;
sprintf(keyring_str[1], "%d", prkey);
rcu_read_lock();
session = rcu_dereference(cred->session_keyring);
if (!session)
session = cred->user->session_keyring;
sskey = session->serial;
rcu_read_unlock();
sprintf(keyring_str[2], "%d", sskey);
/* set up a minimal environment */
i = 0;
envp[i++] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i] = NULL;
/* set up the argument list */
i = 0;
argv[i++] = "/sbin/request-key";
argv[i++] = (char *) op;
argv[i++] = key_str;
argv[i++] = uid_str;
argv[i++] = gid_str;
argv[i++] = keyring_str[0];
argv[i++] = keyring_str[1];
argv[i++] = keyring_str[2];
argv[i] = NULL;
/* do it */
ret = call_usermodehelper_keys(argv[0], argv, envp, keyring,
UMH_WAIT_PROC);
kdebug("usermode -> 0x%x", ret);
if (ret >= 0) {
/* ret is the exit/wait code */
if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) ||
key_validate(key) < 0)
ret = -ENOKEY;
else
/* ignore any errors from userspace if the key was
* instantiated */
ret = 0;
}
error_link:
key_put(keyring);
error_alloc:
complete_request_key(cons, ret);
kleave(" = %d", ret);
return ret;
}
/*
* Call out to userspace for key construction.
*
* Program failure is ignored in favour of key status.
*/
static int construct_key(struct key *key, const void *callout_info,
size_t callout_len, void *aux,
struct key *dest_keyring)
{
struct key_construction *cons;
request_key_actor_t actor;
struct key *authkey;
int ret;
kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
cons = kmalloc(sizeof(*cons), GFP_KERNEL);
if (!cons)
return -ENOMEM;
/* allocate an authorisation key */
authkey = request_key_auth_new(key, callout_info, callout_len,
dest_keyring);
if (IS_ERR(authkey)) {
kfree(cons);
ret = PTR_ERR(authkey);
authkey = NULL;
} else {
cons->authkey = key_get(authkey);
cons->key = key_get(key);
/* make the call */
actor = call_sbin_request_key;
if (key->type->request_key)
actor = key->type->request_key;
ret = actor(cons, "create", aux);
/* check that the actor called complete_request_key() prior to
* returning an error */
WARN_ON(ret < 0 &&
!test_bit(KEY_FLAG_REVOKED, &authkey->flags));
key_put(authkey);
}
kleave(" = %d", ret);
return ret;
}
/*
* Get the appropriate destination keyring for the request.
*
* The keyring selected is returned with an extra reference upon it which the
* caller must release.
*/
static void construct_get_dest_keyring(struct key **_dest_keyring)
{
struct request_key_auth *rka;
const struct cred *cred = current_cred();
struct key *dest_keyring = *_dest_keyring, *authkey;
kenter("%p", dest_keyring);
/* find the appropriate keyring */
if (dest_keyring) {
/* the caller supplied one */
key_get(dest_keyring);
} else {
/* use a default keyring; falling through the cases until we
* find one that we actually have */
switch (cred->jit_keyring) {
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
rka = authkey->payload.data;
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
key_get(rka->dest_keyring);
up_read(&authkey->sem);
if (dest_keyring)
break;
}
case KEY_REQKEY_DEFL_THREAD_KEYRING:
dest_keyring = key_get(cred->thread_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
rcu_dereference(cred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
key_get(cred->user->session_keyring);
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
dest_keyring = key_get(cred->user->uid_keyring);
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
BUG();
}
}
*_dest_keyring = dest_keyring;
kleave(" [dk %d]", key_serial(dest_keyring));
return;
}
/*
* Allocate a new key in under-construction state and attempt to link it in to
* the requested keyring.
*
* May return a key that's already under construction instead if there was a
* race between two thread calling request_key().
*/
static int construct_alloc_key(struct keyring_search_context *ctx,
struct key *dest_keyring,
unsigned long flags,
struct key_user *user,
struct key **_key)
{
struct assoc_array_edit *edit;
struct key *key;
key_perm_t perm;
key_ref_t key_ref;
int ret;
kenter("%s,%s,,,",
ctx->index_key.type->name, ctx->index_key.description);
*_key = NULL;
mutex_lock(&user->cons_lock);
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
if (ctx->index_key.type->read)
perm |= KEY_POS_READ;
if (ctx->index_key.type == &key_type_keyring ||
ctx->index_key.type->update)
perm |= KEY_POS_WRITE;
key = key_alloc(ctx->index_key.type, ctx->index_key.description,
ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
perm, flags);
if (IS_ERR(key))
goto alloc_failed;
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
if (ret < 0)
goto link_prealloc_failed;
}
/* attach the key to the destination keyring under lock, but we do need
* to do another check just in case someone beat us to it whilst we
* waited for locks */
mutex_lock(&key_construction_mutex);
key_ref = search_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto key_already_present;
if (dest_keyring)
__key_link(key, &edit);
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
__key_link_end(dest_keyring, &ctx->index_key, edit);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
return 0;
/* the key is now present - we tell the caller that we found it by
* returning -EINPROGRESS */
key_already_present:
key_put(key);
mutex_unlock(&key_construction_mutex);
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
ret = __key_link_check_live_key(dest_keyring, key);
if (ret == 0)
__key_link(key, &edit);
__key_link_end(dest_keyring, &ctx->index_key, edit);
if (ret < 0)
goto link_check_failed;
}
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = -EINPROGRESS [%d]", key_serial(key));
return -EINPROGRESS;
link_check_failed:
mutex_unlock(&user->cons_lock);
key_put(key);
kleave(" = %d [linkcheck]", ret);
return ret;
link_prealloc_failed:
mutex_unlock(&user->cons_lock);
kleave(" = %d [prelink]", ret);
return ret;
alloc_failed:
mutex_unlock(&user->cons_lock);
kleave(" = %ld", PTR_ERR(key));
return PTR_ERR(key);
}
/*
* Commence key construction.
*/
static struct key *construct_key_and_link(struct keyring_search_context *ctx,
const char *callout_info,
size_t callout_len,
void *aux,
struct key *dest_keyring,
unsigned long flags)
{
struct key_user *user;
struct key *key;
int ret;
kenter("");
user = key_user_lookup(current_fsuid());
if (!user)
return ERR_PTR(-ENOMEM);
construct_get_dest_keyring(&dest_keyring);
ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
key_user_put(user);
if (ret == 0) {
ret = construct_key(key, callout_info, callout_len, aux,
dest_keyring);
if (ret < 0) {
kdebug("cons failed");
goto construction_failed;
}
} else if (ret == -EINPROGRESS) {
ret = 0;
} else {
goto couldnt_alloc_key;
}
key_put(dest_keyring);
kleave(" = key %d", key_serial(key));
return key;
construction_failed:
key_negate_and_link(key, key_negative_timeout, NULL, NULL);
key_put(key);
couldnt_alloc_key:
key_put(dest_keyring);
kleave(" = %d", ret);
return ERR_PTR(ret);
}
/**
* request_key_and_link - Request a key and cache it in a keyring.
* @type: The type of key we want.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
* @dest_keyring: Where to cache the key.
* @flags: Flags to key_alloc().
*
* A key matching the specified criteria is searched for in the process's
* keyrings and returned with its usage count incremented if found. Otherwise,
* if callout_info is not NULL, a key will be allocated and some service
* (probably in userspace) will be asked to instantiate it.
*
* If successfully found or created, the key will be linked to the destination
* keyring if one is provided.
*
* Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
* or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
* found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
* if insufficient key quota was available to create a new key; or -ENOMEM if
* insufficient memory was available.
*
* If the returned key was created, then it may still be under construction,
* and wait_for_key_construction() should be used to wait for that to complete.
*/
struct key *request_key_and_link(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len,
void *aux,
struct key *dest_keyring,
unsigned long flags)
{
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = type->match,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
};
struct key *key;
key_ref_t key_ref;
int ret;
kenter("%s,%s,%p,%zu,%p,%p,%lx",
ctx.index_key.type->name, ctx.index_key.description,
callout_info, callout_len, aux, dest_keyring, flags);
if (type->match_preparse) {
ret = type->match_preparse(&ctx.match_data);
if (ret < 0) {
key = ERR_PTR(ret);
goto error;
}
}
/* search all the process keyrings for a key */
key_ref = search_process_keyrings(&ctx);
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
construct_get_dest_keyring(&dest_keyring);
ret = key_link(dest_keyring, key);
key_put(dest_keyring);
if (ret < 0) {
key_put(key);
key = ERR_PTR(ret);
goto error_free;
}
}
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
} else {
/* the search failed, but the keyrings were searchable, so we
* should consult userspace if we can */
key = ERR_PTR(-ENOKEY);
if (!callout_info)
goto error_free;
key = construct_key_and_link(&ctx, callout_info, callout_len,
aux, dest_keyring, flags);
}
error_free:
if (type->match_free)
type->match_free(&ctx.match_data);
error:
kleave(" = %p", key);
return key;
}
/**
* wait_for_key_construction - Wait for construction of a key to complete
* @key: The key being waited for.
* @intr: Whether to wait interruptibly.
*
* Wait for a key to finish being constructed.
*
* Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
* if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
* revoked or expired.
*/
int wait_for_key_construction(struct key *key, bool intr)
{
int ret;
ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
intr ? key_wait_bit_intr : key_wait_bit,
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret < 0)
return ret;
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
smp_rmb();
return key->type_data.reject_error;
}
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
/**
* request_key - Request a key and wait for construction
* @type: Type of key.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found, new keys are always allocated in the user's quota,
* the callout_info must be a NUL-terminated string and no auxiliary data can
* be passed.
*
* Furthermore, it then works as wait_for_key_construction() to wait for the
* completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key(struct key_type *type,
const char *description,
const char *callout_info)
{
struct key *key;
size_t callout_len = 0;
int ret;
if (callout_info)
callout_len = strlen(callout_info);
key = request_key_and_link(type, description, callout_info, callout_len,
NULL, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
if (ret < 0) {
key_put(key);
return ERR_PTR(ret);
}
}
return key;
}
EXPORT_SYMBOL(request_key);
/**
* request_key_with_auxdata - Request a key with auxiliary data for the upcaller
* @type: The type of key we want.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found and new keys are always allocated in the user's quota.
*
* Furthermore, it then works as wait_for_key_construction() to wait for the
* completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len,
void *aux)
{
struct key *key;
int ret;
key = request_key_and_link(type, description, callout_info, callout_len,
aux, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
if (ret < 0) {
key_put(key);
return ERR_PTR(ret);
}
}
return key;
}
EXPORT_SYMBOL(request_key_with_auxdata);
/*
* request_key_async - Request a key (allow async construction)
* @type: Type of key.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found, new keys are always allocated in the user's quota and
* no auxiliary data can be passed.
*
* The caller should call wait_for_key_construction() to wait for the
* completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len)
{
return request_key_and_link(type, description, callout_info,
callout_len, NULL, NULL,
KEY_ALLOC_IN_QUOTA);
}
EXPORT_SYMBOL(request_key_async);
/*
* request a key with auxiliary data for the upcaller (allow async construction)
* @type: Type of key.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found and new keys are always allocated in the user's quota.
*
* The caller should call wait_for_key_construction() to wait for the
* completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async_with_auxdata(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len,
void *aux)
{
return request_key_and_link(type, description, callout_info,
callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA);
}
EXPORT_SYMBOL(request_key_async_with_auxdata);
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3060_15 |
crossvul-cpp_data_bad_5485_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO PPPP EEEEE RRRR TTTTT Y Y %
% P P R R O O P P E R R T Y Y %
% PPPP RRRR O O PPPP EEE RRRR T Y %
% P R R O O P E R R T Y %
% P R R OOO P EEEEE R R T Y %
% %
% %
% MagickCore Property Methods %
% %
% Software Design %
% Cristy %
% March 2000 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/constitute.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/layer.h"
#include "MagickCore/locale-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/signature.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/token.h"
#include "MagickCore/token-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS2_LCMS2_H)
#include <lcms2/lcms2.h>
#elif defined(MAGICKCORE_HAVE_LCMS2_H)
#include "lcms2.h"
#elif defined(MAGICKCORE_HAVE_LCMS_LCMS_H)
#include <lcms/lcms.h>
#else
#include "lcms.h"
#endif
#endif
/*
Define declarations.
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(LCMS_VERSION) && (LCMS_VERSION < 2000)
#define cmsUInt32Number DWORD
#endif
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o p e r t i e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProperties() clones all the image properties to another image.
%
% The format of the CloneImageProperties method is:
%
% MagickBooleanType CloneImageProperties(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProperties(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
clone_image->filename);
(void) CopyMagickString(image->filename,clone_image->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,clone_image->magick_filename,
MagickPathExtent);
image->compression=clone_image->compression;
image->quality=clone_image->quality;
image->depth=clone_image->depth;
image->alpha_color=clone_image->alpha_color;
image->background_color=clone_image->background_color;
image->border_color=clone_image->border_color;
image->transparent_color=clone_image->transparent_color;
image->gamma=clone_image->gamma;
image->chromaticity=clone_image->chromaticity;
image->rendering_intent=clone_image->rendering_intent;
image->black_point_compensation=clone_image->black_point_compensation;
image->units=clone_image->units;
image->montage=(char *) NULL;
image->directory=(char *) NULL;
(void) CloneString(&image->geometry,clone_image->geometry);
image->offset=clone_image->offset;
image->resolution.x=clone_image->resolution.x;
image->resolution.y=clone_image->resolution.y;
image->page=clone_image->page;
image->tile_offset=clone_image->tile_offset;
image->extract_info=clone_image->extract_info;
image->filter=clone_image->filter;
image->fuzz=clone_image->fuzz;
image->intensity=clone_image->intensity;
image->interlace=clone_image->interlace;
image->interpolate=clone_image->interpolate;
image->endian=clone_image->endian;
image->gravity=clone_image->gravity;
image->compose=clone_image->compose;
image->orientation=clone_image->orientation;
image->scene=clone_image->scene;
image->dispose=clone_image->dispose;
image->delay=clone_image->delay;
image->ticks_per_second=clone_image->ticks_per_second;
image->iterations=clone_image->iterations;
image->total_colors=clone_image->total_colors;
image->taint=clone_image->taint;
image->progress_monitor=clone_image->progress_monitor;
image->client_data=clone_image->client_data;
image->start_loop=clone_image->start_loop;
image->error=clone_image->error;
image->signature=clone_image->signature;
if (clone_image->properties != (void *) NULL)
{
if (image->properties != (void *) NULL)
DestroyImageProperties(image);
image->properties=CloneSplayTree((SplayTreeInfo *)
clone_image->properties,(void *(*)(void *)) ConstantString,
(void *(*)(void *)) ConstantString);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e f i n e I m a g e P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineImageProperty() associates an assignment string of the form
% "key=value" with an artifact or options. It is equivelent to
% SetImageProperty()
%
% The format of the DefineImageProperty method is:
%
% MagickBooleanType DefineImageProperty(Image *image,const char *property,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o property: the image property.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType DefineImageProperty(Image *image,
const char *property,ExceptionInfo *exception)
{
char
key[MagickPathExtent],
value[MagickPathExtent];
register char
*p;
assert(image != (Image *) NULL);
assert(property != (const char *) NULL);
(void) CopyMagickString(key,property,MagickPathExtent-1);
for (p=key; *p != '\0'; p++)
if (*p == '=')
break;
*value='\0';
if (*p == '=')
(void) CopyMagickString(value,p+1,MagickPathExtent);
*p='\0';
return(SetImageProperty(image,key,value,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProperty() deletes an image property.
%
% The format of the DeleteImageProperty method is:
%
% MagickBooleanType DeleteImageProperty(Image *image,const char *property)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o property: the image property.
%
*/
MagickExport MagickBooleanType DeleteImageProperty(Image *image,
const char *property)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->properties == (void *) NULL)
return(MagickFalse);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->properties,property));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o p e r t i e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProperties() destroys all properties and associated memory
% attached to the given image.
%
% The format of the DestroyDefines method is:
%
% void DestroyImageProperties(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProperties(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->properties != (void *) NULL)
image->properties=(void *) DestroySplayTree((SplayTreeInfo *)
image->properties);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F o r m a t I m a g e P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FormatImageProperty() permits formatted property/value pairs to be saved as
% an image property.
%
% The format of the FormatImageProperty method is:
%
% MagickBooleanType FormatImageProperty(Image *image,const char *property,
% const char *format,...)
%
% A description of each parameter follows.
%
% o image: The image.
%
% o property: The attribute property.
%
% o format: A string describing the format to use to write the remaining
% arguments.
%
*/
MagickExport MagickBooleanType FormatImageProperty(Image *image,
const char *property,const char *format,...)
{
char
value[MagickPathExtent];
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
n;
va_list
operands;
va_start(operands,format);
n=FormatLocaleStringList(value,MagickPathExtent,format,operands);
(void) n;
va_end(operands);
exception=AcquireExceptionInfo();
status=SetImageProperty(image,property,value,exception);
exception=DestroyExceptionInfo(exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProperty() gets a value associated with an image property.
%
% This includes, profile prefixes, such as "exif:", "iptc:" and "8bim:"
% It does not handle non-prifile prefixes, such as "fx:", "option:", or
% "artifact:".
%
% The returned string is stored as a properity of the same name for faster
% lookup later. It should NOT be freed by the caller.
%
% The format of the GetImageProperty method is:
%
% const char *GetImageProperty(const Image *image,const char *key,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o key: the key.
%
% o exception: return any errors or warnings in this structure.
%
*/
static char
*TracePSClippath(const unsigned char *,size_t),
*TraceSVGClippath(const unsigned char *,size_t,const size_t,
const size_t);
static MagickBooleanType GetIPTCProperty(const Image *image,const char *key,
ExceptionInfo *exception)
{
char
*attribute,
*message;
const StringInfo
*profile;
long
count,
dataset,
record;
register ssize_t
i;
size_t
length;
profile=GetImageProfile(image,"iptc");
if (profile == (StringInfo *) NULL)
profile=GetImageProfile(image,"8bim");
if (profile == (StringInfo *) NULL)
return(MagickFalse);
count=sscanf(key,"IPTC:%ld:%ld",&dataset,&record);
if (count != 2)
return(MagickFalse);
attribute=(char *) NULL;
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i+=(ssize_t) length)
{
length=1;
if ((ssize_t) GetStringInfoDatum(profile)[i] != 0x1c)
continue;
length=(size_t) (GetStringInfoDatum(profile)[i+3] << 8);
length|=GetStringInfoDatum(profile)[i+4];
if (((long) GetStringInfoDatum(profile)[i+1] == dataset) &&
((long) GetStringInfoDatum(profile)[i+2] == record))
{
message=(char *) NULL;
if (~length >= 1)
message=(char *) AcquireQuantumMemory(length+1UL,sizeof(*message));
if (message != (char *) NULL)
{
(void) CopyMagickString(message,(char *) GetStringInfoDatum(
profile)+i+5,length+1);
(void) ConcatenateString(&attribute,message);
(void) ConcatenateString(&attribute,";");
message=DestroyString(message);
}
}
i+=5;
}
if ((attribute == (char *) NULL) || (*attribute == ';'))
{
if (attribute != (char *) NULL)
attribute=DestroyString(attribute);
return(MagickFalse);
}
attribute[strlen(attribute)-1]='\0';
(void) SetImageProperty((Image *) image,key,(const char *) attribute,
exception);
attribute=DestroyString(attribute);
return(MagickTrue);
}
static inline int ReadPropertyByte(const unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed int ReadPropertyMSBLong(const unsigned char **p,
size_t *length)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
int
c;
register ssize_t
i;
unsigned char
buffer[4];
unsigned int
value;
if (*length < 4)
return(-1);
for (i=0; i < 4; i++)
{
c=(int) (*(*p)++);
(*length)--;
buffer[i]=(unsigned char) c;
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed short ReadPropertyMSBShort(const unsigned char **p,
size_t *length)
{
union
{
unsigned short
unsigned_value;
signed short
signed_value;
} quantum;
int
c;
register ssize_t
i;
unsigned char
buffer[2];
unsigned short
value;
if (*length < 2)
return((unsigned short) ~0);
for (i=0; i < 2; i++)
{
c=(int) (*(*p)++);
(*length)--;
buffer[i]=(unsigned char) c;
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static MagickBooleanType Get8BIMProperty(const Image *image,const char *key,
ExceptionInfo *exception)
{
char
*attribute,
format[MagickPathExtent],
name[MagickPathExtent],
*resource;
const StringInfo
*profile;
const unsigned char
*info;
long
start,
stop;
MagickBooleanType
status;
register ssize_t
i;
size_t
length;
ssize_t
count,
id,
sub_number;
/*
There are no newlines in path names, so it's safe as terminator.
*/
profile=GetImageProfile(image,"8bim");
if (profile == (StringInfo *) NULL)
return(MagickFalse);
count=(ssize_t) sscanf(key,"8BIM:%ld,%ld:%1024[^\n]\n%1024[^\n]",&start,&stop,
name,format);
if ((count != 2) && (count != 3) && (count != 4))
return(MagickFalse);
if (count < 4)
(void) CopyMagickString(format,"SVG",MagickPathExtent);
if (count < 3)
*name='\0';
sub_number=1;
if (*name == '#')
sub_number=(ssize_t) StringToLong(&name[1]);
sub_number=MagickMax(sub_number,1L);
resource=(char *) NULL;
status=MagickFalse;
length=GetStringInfoLength(profile);
info=GetStringInfoDatum(profile);
while ((length > 0) && (status == MagickFalse))
{
if (ReadPropertyByte(&info,&length) != (unsigned char) '8')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'B')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'I')
continue;
if (ReadPropertyByte(&info,&length) != (unsigned char) 'M')
continue;
id=(ssize_t) ReadPropertyMSBShort(&info,&length);
if (id < (ssize_t) start)
continue;
if (id > (ssize_t) stop)
continue;
if (resource != (char *) NULL)
resource=DestroyString(resource);
count=(ssize_t) ReadPropertyByte(&info,&length);
if ((count != 0) && ((size_t) count <= length))
{
resource=(char *) NULL;
if (~((size_t) count) >= (MagickPathExtent-1))
resource=(char *) AcquireQuantumMemory((size_t) count+
MagickPathExtent,sizeof(*resource));
if (resource != (char *) NULL)
{
for (i=0; i < (ssize_t) count; i++)
resource[i]=(char) ReadPropertyByte(&info,&length);
resource[count]='\0';
}
}
if ((count & 0x01) == 0)
(void) ReadPropertyByte(&info,&length);
count=(ssize_t) ReadPropertyMSBLong(&info,&length);
if ((count < 0) || ((size_t) count > length))
{
length=0;
continue;
}
if ((*name != '\0') && (*name != '#'))
if ((resource == (char *) NULL) || (LocaleCompare(name,resource) != 0))
{
/*
No name match, scroll forward and try next.
*/
info+=count;
length-=MagickMin(count,(ssize_t) length);
continue;
}
if ((*name == '#') && (sub_number != 1))
{
/*
No numbered match, scroll forward and try next.
*/
sub_number--;
info+=count;
length-=MagickMin(count,(ssize_t) length);
continue;
}
/*
We have the resource of interest.
*/
attribute=(char *) NULL;
if (~((size_t) count) >= (MagickPathExtent-1))
attribute=(char *) AcquireQuantumMemory((size_t) count+MagickPathExtent,
sizeof(*attribute));
if (attribute != (char *) NULL)
{
(void) CopyMagickMemory(attribute,(char *) info,(size_t) count);
attribute[count]='\0';
info+=count;
length-=MagickMin(count,(ssize_t) length);
if ((id <= 1999) || (id >= 2999))
(void) SetImageProperty((Image *) image,key,(const char *)
attribute,exception);
else
{
char
*path;
if (LocaleCompare(format,"svg") == 0)
path=TraceSVGClippath((unsigned char *) attribute,(size_t) count,
image->columns,image->rows);
else
path=TracePSClippath((unsigned char *) attribute,(size_t) count);
(void) SetImageProperty((Image *) image,key,(const char *) path,
exception);
path=DestroyString(path);
}
attribute=DestroyString(attribute);
status=MagickTrue;
}
}
if (resource != (char *) NULL)
resource=DestroyString(resource);
return(status);
}
static inline signed int ReadPropertySignedLong(const EndianType endian,
const unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline unsigned int ReadPropertyUnsignedLong(const EndianType endian,
const unsigned char *buffer)
{
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
return(value & 0xffffffff);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
return(value & 0xffffffff);
}
static inline signed short ReadPropertySignedShort(const EndianType endian,
const unsigned char *buffer)
{
union
{
unsigned short
unsigned_value;
signed short
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline unsigned short ReadPropertyUnsignedShort(const EndianType endian,
const unsigned char *buffer)
{
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
return(value & 0xffff);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
return(value & 0xffff);
}
static MagickBooleanType GetEXIFProperty(const Image *image,
const char *property,ExceptionInfo *exception)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define EXIF_FMT_BYTE 1
#define EXIF_FMT_STRING 2
#define EXIF_FMT_USHORT 3
#define EXIF_FMT_ULONG 4
#define EXIF_FMT_URATIONAL 5
#define EXIF_FMT_SBYTE 6
#define EXIF_FMT_UNDEFINED 7
#define EXIF_FMT_SSHORT 8
#define EXIF_FMT_SLONG 9
#define EXIF_FMT_SRATIONAL 10
#define EXIF_FMT_SINGLE 11
#define EXIF_FMT_DOUBLE 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_GPS_OFFSET 0x8825
#define TAG_INTEROP_OFFSET 0xa005
#define EXIFMultipleValues(size,format,arg) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MagickPathExtent-length, \
format", ",arg); \
if (length >= (MagickPathExtent-1)) \
length=MagickPathExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
#define EXIFMultipleFractions(size,format,arg1,arg2) \
{ \
ssize_t \
component; \
\
size_t \
length; \
\
unsigned char \
*p1; \
\
length=0; \
p1=p; \
for (component=0; component < components; component++) \
{ \
length+=FormatLocaleString(buffer+length,MagickPathExtent-length, \
format", ",(arg1),(arg2)); \
if (length >= (MagickPathExtent-1)) \
length=MagickPathExtent-1; \
p1+=size; \
} \
if (length > 1) \
buffer[length-2]='\0'; \
value=AcquireString(buffer); \
}
typedef struct _DirectoryInfo
{
const unsigned char
*directory;
size_t
entry;
ssize_t
offset;
} DirectoryInfo;
typedef struct _TagInfo
{
size_t
tag;
const char
*description;
} TagInfo;
static TagInfo
EXIFTag[] =
{
{ 0x001, "exif:InteroperabilityIndex" },
{ 0x002, "exif:InteroperabilityVersion" },
{ 0x100, "exif:ImageWidth" },
{ 0x101, "exif:ImageLength" },
{ 0x102, "exif:BitsPerSample" },
{ 0x103, "exif:Compression" },
{ 0x106, "exif:PhotometricInterpretation" },
{ 0x10a, "exif:FillOrder" },
{ 0x10d, "exif:DocumentName" },
{ 0x10e, "exif:ImageDescription" },
{ 0x10f, "exif:Make" },
{ 0x110, "exif:Model" },
{ 0x111, "exif:StripOffsets" },
{ 0x112, "exif:Orientation" },
{ 0x115, "exif:SamplesPerPixel" },
{ 0x116, "exif:RowsPerStrip" },
{ 0x117, "exif:StripByteCounts" },
{ 0x11a, "exif:XResolution" },
{ 0x11b, "exif:YResolution" },
{ 0x11c, "exif:PlanarConfiguration" },
{ 0x11d, "exif:PageName" },
{ 0x11e, "exif:XPosition" },
{ 0x11f, "exif:YPosition" },
{ 0x118, "exif:MinSampleValue" },
{ 0x119, "exif:MaxSampleValue" },
{ 0x120, "exif:FreeOffsets" },
{ 0x121, "exif:FreeByteCounts" },
{ 0x122, "exif:GrayResponseUnit" },
{ 0x123, "exif:GrayResponseCurve" },
{ 0x124, "exif:T4Options" },
{ 0x125, "exif:T6Options" },
{ 0x128, "exif:ResolutionUnit" },
{ 0x12d, "exif:TransferFunction" },
{ 0x131, "exif:Software" },
{ 0x132, "exif:DateTime" },
{ 0x13b, "exif:Artist" },
{ 0x13e, "exif:WhitePoint" },
{ 0x13f, "exif:PrimaryChromaticities" },
{ 0x140, "exif:ColorMap" },
{ 0x141, "exif:HalfToneHints" },
{ 0x142, "exif:TileWidth" },
{ 0x143, "exif:TileLength" },
{ 0x144, "exif:TileOffsets" },
{ 0x145, "exif:TileByteCounts" },
{ 0x14a, "exif:SubIFD" },
{ 0x14c, "exif:InkSet" },
{ 0x14d, "exif:InkNames" },
{ 0x14e, "exif:NumberOfInks" },
{ 0x150, "exif:DotRange" },
{ 0x151, "exif:TargetPrinter" },
{ 0x152, "exif:ExtraSample" },
{ 0x153, "exif:SampleFormat" },
{ 0x154, "exif:SMinSampleValue" },
{ 0x155, "exif:SMaxSampleValue" },
{ 0x156, "exif:TransferRange" },
{ 0x157, "exif:ClipPath" },
{ 0x158, "exif:XClipPathUnits" },
{ 0x159, "exif:YClipPathUnits" },
{ 0x15a, "exif:Indexed" },
{ 0x15b, "exif:JPEGTables" },
{ 0x15f, "exif:OPIProxy" },
{ 0x200, "exif:JPEGProc" },
{ 0x201, "exif:JPEGInterchangeFormat" },
{ 0x202, "exif:JPEGInterchangeFormatLength" },
{ 0x203, "exif:JPEGRestartInterval" },
{ 0x205, "exif:JPEGLosslessPredictors" },
{ 0x206, "exif:JPEGPointTransforms" },
{ 0x207, "exif:JPEGQTables" },
{ 0x208, "exif:JPEGDCTables" },
{ 0x209, "exif:JPEGACTables" },
{ 0x211, "exif:YCbCrCoefficients" },
{ 0x212, "exif:YCbCrSubSampling" },
{ 0x213, "exif:YCbCrPositioning" },
{ 0x214, "exif:ReferenceBlackWhite" },
{ 0x2bc, "exif:ExtensibleMetadataPlatform" },
{ 0x301, "exif:Gamma" },
{ 0x302, "exif:ICCProfileDescriptor" },
{ 0x303, "exif:SRGBRenderingIntent" },
{ 0x320, "exif:ImageTitle" },
{ 0x5001, "exif:ResolutionXUnit" },
{ 0x5002, "exif:ResolutionYUnit" },
{ 0x5003, "exif:ResolutionXLengthUnit" },
{ 0x5004, "exif:ResolutionYLengthUnit" },
{ 0x5005, "exif:PrintFlags" },
{ 0x5006, "exif:PrintFlagsVersion" },
{ 0x5007, "exif:PrintFlagsCrop" },
{ 0x5008, "exif:PrintFlagsBleedWidth" },
{ 0x5009, "exif:PrintFlagsBleedWidthScale" },
{ 0x500A, "exif:HalftoneLPI" },
{ 0x500B, "exif:HalftoneLPIUnit" },
{ 0x500C, "exif:HalftoneDegree" },
{ 0x500D, "exif:HalftoneShape" },
{ 0x500E, "exif:HalftoneMisc" },
{ 0x500F, "exif:HalftoneScreen" },
{ 0x5010, "exif:JPEGQuality" },
{ 0x5011, "exif:GridSize" },
{ 0x5012, "exif:ThumbnailFormat" },
{ 0x5013, "exif:ThumbnailWidth" },
{ 0x5014, "exif:ThumbnailHeight" },
{ 0x5015, "exif:ThumbnailColorDepth" },
{ 0x5016, "exif:ThumbnailPlanes" },
{ 0x5017, "exif:ThumbnailRawBytes" },
{ 0x5018, "exif:ThumbnailSize" },
{ 0x5019, "exif:ThumbnailCompressedSize" },
{ 0x501a, "exif:ColorTransferFunction" },
{ 0x501b, "exif:ThumbnailData" },
{ 0x5020, "exif:ThumbnailImageWidth" },
{ 0x5021, "exif:ThumbnailImageHeight" },
{ 0x5022, "exif:ThumbnailBitsPerSample" },
{ 0x5023, "exif:ThumbnailCompression" },
{ 0x5024, "exif:ThumbnailPhotometricInterp" },
{ 0x5025, "exif:ThumbnailImageDescription" },
{ 0x5026, "exif:ThumbnailEquipMake" },
{ 0x5027, "exif:ThumbnailEquipModel" },
{ 0x5028, "exif:ThumbnailStripOffsets" },
{ 0x5029, "exif:ThumbnailOrientation" },
{ 0x502a, "exif:ThumbnailSamplesPerPixel" },
{ 0x502b, "exif:ThumbnailRowsPerStrip" },
{ 0x502c, "exif:ThumbnailStripBytesCount" },
{ 0x502d, "exif:ThumbnailResolutionX" },
{ 0x502e, "exif:ThumbnailResolutionY" },
{ 0x502f, "exif:ThumbnailPlanarConfig" },
{ 0x5030, "exif:ThumbnailResolutionUnit" },
{ 0x5031, "exif:ThumbnailTransferFunction" },
{ 0x5032, "exif:ThumbnailSoftwareUsed" },
{ 0x5033, "exif:ThumbnailDateTime" },
{ 0x5034, "exif:ThumbnailArtist" },
{ 0x5035, "exif:ThumbnailWhitePoint" },
{ 0x5036, "exif:ThumbnailPrimaryChromaticities" },
{ 0x5037, "exif:ThumbnailYCbCrCoefficients" },
{ 0x5038, "exif:ThumbnailYCbCrSubsampling" },
{ 0x5039, "exif:ThumbnailYCbCrPositioning" },
{ 0x503A, "exif:ThumbnailRefBlackWhite" },
{ 0x503B, "exif:ThumbnailCopyRight" },
{ 0x5090, "exif:LuminanceTable" },
{ 0x5091, "exif:ChrominanceTable" },
{ 0x5100, "exif:FrameDelay" },
{ 0x5101, "exif:LoopCount" },
{ 0x5110, "exif:PixelUnit" },
{ 0x5111, "exif:PixelPerUnitX" },
{ 0x5112, "exif:PixelPerUnitY" },
{ 0x5113, "exif:PaletteHistogram" },
{ 0x1000, "exif:RelatedImageFileFormat" },
{ 0x1001, "exif:RelatedImageLength" },
{ 0x1002, "exif:RelatedImageWidth" },
{ 0x800d, "exif:ImageID" },
{ 0x80e3, "exif:Matteing" },
{ 0x80e4, "exif:DataType" },
{ 0x80e5, "exif:ImageDepth" },
{ 0x80e6, "exif:TileDepth" },
{ 0x828d, "exif:CFARepeatPatternDim" },
{ 0x828e, "exif:CFAPattern2" },
{ 0x828f, "exif:BatteryLevel" },
{ 0x8298, "exif:Copyright" },
{ 0x829a, "exif:ExposureTime" },
{ 0x829d, "exif:FNumber" },
{ 0x83bb, "exif:IPTC/NAA" },
{ 0x84e3, "exif:IT8RasterPadding" },
{ 0x84e5, "exif:IT8ColorTable" },
{ 0x8649, "exif:ImageResourceInformation" },
{ 0x8769, "exif:ExifOffset" },
{ 0x8773, "exif:InterColorProfile" },
{ 0x8822, "exif:ExposureProgram" },
{ 0x8824, "exif:SpectralSensitivity" },
{ 0x8825, "exif:GPSInfo" },
{ 0x8827, "exif:ISOSpeedRatings" },
{ 0x8828, "exif:OECF" },
{ 0x8829, "exif:Interlace" },
{ 0x882a, "exif:TimeZoneOffset" },
{ 0x882b, "exif:SelfTimerMode" },
{ 0x9000, "exif:ExifVersion" },
{ 0x9003, "exif:DateTimeOriginal" },
{ 0x9004, "exif:DateTimeDigitized" },
{ 0x9101, "exif:ComponentsConfiguration" },
{ 0x9102, "exif:CompressedBitsPerPixel" },
{ 0x9201, "exif:ShutterSpeedValue" },
{ 0x9202, "exif:ApertureValue" },
{ 0x9203, "exif:BrightnessValue" },
{ 0x9204, "exif:ExposureBiasValue" },
{ 0x9205, "exif:MaxApertureValue" },
{ 0x9206, "exif:SubjectDistance" },
{ 0x9207, "exif:MeteringMode" },
{ 0x9208, "exif:LightSource" },
{ 0x9209, "exif:Flash" },
{ 0x920a, "exif:FocalLength" },
{ 0x920b, "exif:FlashEnergy" },
{ 0x920c, "exif:SpatialFrequencyResponse" },
{ 0x920d, "exif:Noise" },
{ 0x9211, "exif:ImageNumber" },
{ 0x9212, "exif:SecurityClassification" },
{ 0x9213, "exif:ImageHistory" },
{ 0x9214, "exif:SubjectArea" },
{ 0x9215, "exif:ExposureIndex" },
{ 0x9216, "exif:TIFF-EPStandardID" },
{ 0x927c, "exif:MakerNote" },
{ 0x9C9b, "exif:WinXP-Title" },
{ 0x9C9c, "exif:WinXP-Comments" },
{ 0x9C9d, "exif:WinXP-Author" },
{ 0x9C9e, "exif:WinXP-Keywords" },
{ 0x9C9f, "exif:WinXP-Subject" },
{ 0x9286, "exif:UserComment" },
{ 0x9290, "exif:SubSecTime" },
{ 0x9291, "exif:SubSecTimeOriginal" },
{ 0x9292, "exif:SubSecTimeDigitized" },
{ 0xa000, "exif:FlashPixVersion" },
{ 0xa001, "exif:ColorSpace" },
{ 0xa002, "exif:ExifImageWidth" },
{ 0xa003, "exif:ExifImageLength" },
{ 0xa004, "exif:RelatedSoundFile" },
{ 0xa005, "exif:InteroperabilityOffset" },
{ 0xa20b, "exif:FlashEnergy" },
{ 0xa20c, "exif:SpatialFrequencyResponse" },
{ 0xa20d, "exif:Noise" },
{ 0xa20e, "exif:FocalPlaneXResolution" },
{ 0xa20f, "exif:FocalPlaneYResolution" },
{ 0xa210, "exif:FocalPlaneResolutionUnit" },
{ 0xa214, "exif:SubjectLocation" },
{ 0xa215, "exif:ExposureIndex" },
{ 0xa216, "exif:TIFF/EPStandardID" },
{ 0xa217, "exif:SensingMethod" },
{ 0xa300, "exif:FileSource" },
{ 0xa301, "exif:SceneType" },
{ 0xa302, "exif:CFAPattern" },
{ 0xa401, "exif:CustomRendered" },
{ 0xa402, "exif:ExposureMode" },
{ 0xa403, "exif:WhiteBalance" },
{ 0xa404, "exif:DigitalZoomRatio" },
{ 0xa405, "exif:FocalLengthIn35mmFilm" },
{ 0xa406, "exif:SceneCaptureType" },
{ 0xa407, "exif:GainControl" },
{ 0xa408, "exif:Contrast" },
{ 0xa409, "exif:Saturation" },
{ 0xa40a, "exif:Sharpness" },
{ 0xa40b, "exif:DeviceSettingDescription" },
{ 0xa40c, "exif:SubjectDistanceRange" },
{ 0xa420, "exif:ImageUniqueID" },
{ 0xc4a5, "exif:PrintImageMatching" },
{ 0xa500, "exif:Gamma" },
{ 0xc640, "exif:CR2Slice" },
{ 0x10000, "exif:GPSVersionID" },
{ 0x10001, "exif:GPSLatitudeRef" },
{ 0x10002, "exif:GPSLatitude" },
{ 0x10003, "exif:GPSLongitudeRef" },
{ 0x10004, "exif:GPSLongitude" },
{ 0x10005, "exif:GPSAltitudeRef" },
{ 0x10006, "exif:GPSAltitude" },
{ 0x10007, "exif:GPSTimeStamp" },
{ 0x10008, "exif:GPSSatellites" },
{ 0x10009, "exif:GPSStatus" },
{ 0x1000a, "exif:GPSMeasureMode" },
{ 0x1000b, "exif:GPSDop" },
{ 0x1000c, "exif:GPSSpeedRef" },
{ 0x1000d, "exif:GPSSpeed" },
{ 0x1000e, "exif:GPSTrackRef" },
{ 0x1000f, "exif:GPSTrack" },
{ 0x10010, "exif:GPSImgDirectionRef" },
{ 0x10011, "exif:GPSImgDirection" },
{ 0x10012, "exif:GPSMapDatum" },
{ 0x10013, "exif:GPSDestLatitudeRef" },
{ 0x10014, "exif:GPSDestLatitude" },
{ 0x10015, "exif:GPSDestLongitudeRef" },
{ 0x10016, "exif:GPSDestLongitude" },
{ 0x10017, "exif:GPSDestBearingRef" },
{ 0x10018, "exif:GPSDestBearing" },
{ 0x10019, "exif:GPSDestDistanceRef" },
{ 0x1001a, "exif:GPSDestDistance" },
{ 0x1001b, "exif:GPSProcessingMethod" },
{ 0x1001c, "exif:GPSAreaInformation" },
{ 0x1001d, "exif:GPSDateStamp" },
{ 0x1001e, "exif:GPSDifferential" },
{ 0x00000, (const char *) NULL }
};
const StringInfo
*profile;
const unsigned char
*directory,
*exif;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
MagickBooleanType
status;
register ssize_t
i;
size_t
entry,
length,
number_entries,
tag,
tag_value;
SplayTreeInfo
*exif_resources;
ssize_t
all,
id,
level,
offset,
tag_offset;
static int
tag_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
/*
If EXIF data exists, then try to parse the request for a tag.
*/
profile=GetImageProfile(image,"exif");
if (profile == (const StringInfo *) NULL)
return(MagickFalse);
if ((property == (const char *) NULL) || (*property == '\0'))
return(MagickFalse);
while (isspace((int) ((unsigned char) *property)) != 0)
property++;
if (strlen(property) <= 5)
return(MagickFalse);
all=0;
tag=(~0UL);
switch (*(property+5))
{
case '*':
{
/*
Caller has asked for all the tags in the EXIF data.
*/
tag=0;
all=1; /* return the data in description=value format */
break;
}
case '!':
{
tag=0;
all=2; /* return the data in tagid=value format */
break;
}
case '#':
case '@':
{
int
c;
size_t
n;
/*
Check for a hex based tag specification first.
*/
tag=(*(property+5) == '@') ? 1UL : 0UL;
property+=6;
n=strlen(property);
if (n != 4)
return(MagickFalse);
/*
Parse tag specification as a hex number.
*/
n/=4;
do
{
for (i=(ssize_t) n-1L; i >= 0; i--)
{
c=(*property++);
tag<<=4;
if ((c >= '0') && (c <= '9'))
tag|=(c-'0');
else
if ((c >= 'A') && (c <= 'F'))
tag|=(c-('A'-10));
else
if ((c >= 'a') && (c <= 'f'))
tag|=(c-('a'-10));
else
return(MagickFalse);
}
} while (*property != '\0');
break;
}
default:
{
/*
Try to match the text with a tag name instead.
*/
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (LocaleCompare(EXIFTag[i].description,property) == 0)
{
tag=(size_t) EXIFTag[i].tag;
break;
}
}
break;
}
}
if (tag == (~0UL))
return(MagickFalse);
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadPropertyByte(&exif,&length) != 0x45)
continue;
if (ReadPropertyByte(&exif,&length) != 0x78)
continue;
if (ReadPropertyByte(&exif,&length) != 0x69)
continue;
if (ReadPropertyByte(&exif,&length) != 0x66)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
if (ReadPropertyByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadPropertySignedShort(LSBEndian,exif);
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadPropertyUnsignedShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadPropertySignedLong(endian,exif+4);
if ((offset < 0) || (size_t) offset >= length)
return(MagickFalse);
/*
Set the pointer to the first IFD and follow it were it leads.
*/
status=MagickFalse;
directory=exif+offset;
level=0;
entry=0;
tag_offset=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
/*
If there is anything on the stack then pop it off.
*/
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
tag_offset=directory_stack[level].offset;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=(size_t) ReadPropertyUnsignedShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
register unsigned char
*p,
*q;
size_t
format;
ssize_t
number_bytes,
components;
q=(unsigned char *) (directory+(12*entry)+2);
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(size_t) ReadPropertyUnsignedShort(endian,q)+tag_offset;
format=(size_t) ReadPropertyUnsignedShort(endian,q+2);
if (format >= (sizeof(tag_bytes)/sizeof(*tag_bytes)))
break;
components=(ssize_t) ReadPropertySignedLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*tag_bytes[format];
if (number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
ssize_t
offset;
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadPropertySignedLong(endian,q+8);
if ((offset < 0) || (size_t) offset >= length)
continue;
if ((ssize_t) (offset+number_bytes) < offset)
continue; /* prevent overflow */
if ((size_t) (offset+number_bytes) > length)
continue;
p=(unsigned char *) (exif+offset);
}
if ((all != 0) || (tag == (size_t) tag_value))
{
char
buffer[MagickPathExtent],
*value;
value=(char *) NULL;
*buffer='\0';
switch (format)
{
case EXIF_FMT_BYTE:
case EXIF_FMT_UNDEFINED:
{
EXIFMultipleValues(1,"%.20g",(double) (*(unsigned char *) p1));
break;
}
case EXIF_FMT_SBYTE:
{
EXIFMultipleValues(1,"%.20g",(double) (*(signed char *) p1));
break;
}
case EXIF_FMT_SSHORT:
{
EXIFMultipleValues(2,"%hd",ReadPropertySignedShort(endian,p1));
break;
}
case EXIF_FMT_USHORT:
{
EXIFMultipleValues(2,"%hu",ReadPropertyUnsignedShort(endian,p1));
break;
}
case EXIF_FMT_ULONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertyUnsignedLong(endian,p1));
break;
}
case EXIF_FMT_SLONG:
{
EXIFMultipleValues(4,"%.20g",(double)
ReadPropertySignedLong(endian,p1));
break;
}
case EXIF_FMT_URATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertyUnsignedLong(endian,p1),(double)
ReadPropertyUnsignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SRATIONAL:
{
EXIFMultipleFractions(8,"%.20g/%.20g",(double)
ReadPropertySignedLong(endian,p1),(double)
ReadPropertySignedLong(endian,p1+4));
break;
}
case EXIF_FMT_SINGLE:
{
EXIFMultipleValues(4,"%f",(double) *(float *) p1);
break;
}
case EXIF_FMT_DOUBLE:
{
EXIFMultipleValues(8,"%f",*(double *) p1);
break;
}
default:
case EXIF_FMT_STRING:
{
value=(char *) NULL;
if (~((size_t) number_bytes) >= 1)
value=(char *) AcquireQuantumMemory((size_t) number_bytes+1UL,
sizeof(*value));
if (value != (char *) NULL)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_bytes; i++)
{
value[i]='.';
if ((isprint((int) p[i]) != 0) || (p[i] == '\0'))
value[i]=(char) p[i];
}
value[i]='\0';
}
break;
}
}
if (value != (char *) NULL)
{
char
*key;
register const char
*p;
key=AcquireString(property);
switch (all)
{
case 1:
{
const char
*description;
register ssize_t
i;
description="unknown";
for (i=0; ; i++)
{
if (EXIFTag[i].tag == 0)
break;
if (EXIFTag[i].tag == tag_value)
{
description=EXIFTag[i].description;
break;
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%s",
description);
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
break;
}
case 2:
{
if (tag_value < 0x10000)
(void) FormatLocaleString(key,MagickPathExtent,"#%04lx",
(unsigned long) tag_value);
else
if (tag_value < 0x20000)
(void) FormatLocaleString(key,MagickPathExtent,"@%04lx",
(unsigned long) (tag_value & 0xffff));
else
(void) FormatLocaleString(key,MagickPathExtent,"unknown");
break;
}
default:
{
if (level == 2)
(void) SubstituteString(&key,"exif:","exif:thumbnail:");
}
}
p=(const char *) NULL;
if (image->properties != (void *) NULL)
p=(const char *) GetValueFromSplayTree((SplayTreeInfo *)
image->properties,key);
if (p == (const char *) NULL)
(void) SetImageProperty((Image *) image,key,value,exception);
value=DestroyString(value);
key=DestroyString(key);
status=MagickTrue;
}
}
if ((tag_value == TAG_EXIF_OFFSET) ||
(tag_value == TAG_INTEROP_OFFSET) || (tag_value == TAG_GPS_OFFSET))
{
ssize_t
offset;
offset=(ssize_t) ReadPropertySignedLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
ssize_t
tag_offset1;
tag_offset1=(ssize_t) ((tag_value == TAG_GPS_OFFSET) ? 0x10000 :
0);
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
directory_stack[level].offset=tag_offset;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].offset=tag_offset1;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadPropertySignedLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
directory_stack[level].offset=tag_offset1;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(status);
}
static MagickBooleanType GetICCProperty(const Image *image,const char *property,
ExceptionInfo *exception)
{
const StringInfo
*profile;
magick_unreferenced(property);
profile=GetImageProfile(image,"icc");
if (profile == (StringInfo *) NULL)
profile=GetImageProfile(image,"icm");
if (profile == (StringInfo *) NULL)
return(MagickFalse);
if (GetStringInfoLength(profile) < 128)
return(MagickFalse); /* minimum ICC profile length */
#if defined(MAGICKCORE_LCMS_DELEGATE)
{
cmsHPROFILE
icc_profile;
icc_profile=cmsOpenProfileFromMem(GetStringInfoDatum(profile),
(cmsUInt32Number) GetStringInfoLength(profile));
if (icc_profile != (cmsHPROFILE *) NULL)
{
#if defined(LCMS_VERSION) && (LCMS_VERSION < 2000)
const char
*name;
name=cmsTakeProductName(icc_profile);
if (name != (const char *) NULL)
(void) SetImageProperty((Image *) image,"icc:name",name,exception);
#else
char
info[MagickPathExtent];
(void) cmsGetProfileInfoASCII(icc_profile,cmsInfoDescription,"en","US",
info,MagickPathExtent);
(void) SetImageProperty((Image *) image,"icc:description",info,
exception);
(void) cmsGetProfileInfoASCII(icc_profile,cmsInfoManufacturer,"en","US",
info,MagickPathExtent);
(void) SetImageProperty((Image *) image,"icc:manufacturer",info,
exception);
(void) cmsGetProfileInfoASCII(icc_profile,cmsInfoModel,"en","US",info,
MagickPathExtent);
(void) SetImageProperty((Image *) image,"icc:model",info,exception);
(void) cmsGetProfileInfoASCII(icc_profile,cmsInfoCopyright,"en","US",
info,MagickPathExtent);
(void) SetImageProperty((Image *) image,"icc:copyright",info,exception);
#endif
(void) cmsCloseProfile(icc_profile);
}
}
#endif
return(MagickTrue);
}
static MagickBooleanType SkipXMPValue(const char *value)
{
if (value == (const char*) NULL)
return(MagickTrue);
while (*value != '\0')
{
if (isspace((int) ((unsigned char) *value)) == 0)
return(MagickFalse);
value++;
}
return(MagickTrue);
}
static MagickBooleanType GetXMPProperty(const Image *image,const char *property)
{
char
*xmp_profile;
const char
*content;
const StringInfo
*profile;
ExceptionInfo
*exception;
MagickBooleanType
status;
register const char
*p;
XMLTreeInfo
*child,
*description,
*node,
*rdf,
*xmp;
profile=GetImageProfile(image,"xmp");
if (profile == (StringInfo *) NULL)
return(MagickFalse);
if ((property == (const char *) NULL) || (*property == '\0'))
return(MagickFalse);
xmp_profile=StringInfoToString(profile);
if (xmp_profile == (char *) NULL)
return(MagickFalse);
for (p=xmp_profile; *p != '\0'; p++)
if ((*p == '<') && (*(p+1) == 'x'))
break;
exception=AcquireExceptionInfo();
xmp=NewXMLTree((char *) p,exception);
xmp_profile=DestroyString(xmp_profile);
exception=DestroyExceptionInfo(exception);
if (xmp == (XMLTreeInfo *) NULL)
return(MagickFalse);
status=MagickFalse;
rdf=GetXMLTreeChild(xmp,"rdf:RDF");
if (rdf != (XMLTreeInfo *) NULL)
{
if (image->properties == (void *) NULL)
((Image *) image)->properties=NewSplayTree(CompareSplayTreeString,
RelinquishMagickMemory,RelinquishMagickMemory);
description=GetXMLTreeChild(rdf,"rdf:Description");
while (description != (XMLTreeInfo *) NULL)
{
node=GetXMLTreeChild(description,(const char *) NULL);
while (node != (XMLTreeInfo *) NULL)
{
child=GetXMLTreeChild(node,(const char *) NULL);
content=GetXMLTreeContent(node);
if ((child == (XMLTreeInfo *) NULL) &&
(SkipXMPValue(content) == MagickFalse))
(void) AddValueToSplayTree((SplayTreeInfo *) image->properties,
ConstantString(GetXMLTreeTag(node)),ConstantString(content));
while (child != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(child);
if (SkipXMPValue(content) == MagickFalse)
(void) AddValueToSplayTree((SplayTreeInfo *) image->properties,
ConstantString(GetXMLTreeTag(child)),ConstantString(content));
child=GetXMLTreeSibling(child);
}
node=GetXMLTreeSibling(node);
}
description=GetNextXMLTreeTag(description);
}
}
xmp=DestroyXMLTree(xmp);
return(status);
}
static char *TracePSClippath(const unsigned char *blob,size_t length)
{
char
*path,
*message;
MagickBooleanType
in_subpath;
PointInfo
first[3],
last[3],
point[3];
register ssize_t
i,
x;
ssize_t
knot_count,
selector,
y;
path=AcquireString((char *) NULL);
if (path == (char *) NULL)
return((char *) NULL);
message=AcquireString((char *) NULL);
(void) FormatLocaleString(message,MagickPathExtent,"/ClipImage\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,"{\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,
" /c {curveto} bind def\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,
" /l {lineto} bind def\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,
" /m {moveto} bind def\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,
" /v {currentpoint 6 2 roll curveto} bind def\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,
" /y {2 copy curveto} bind def\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,
" /z {closepath} bind def\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent," newpath\n");
(void) ConcatenateString(&path,message);
/*
The clipping path format is defined in "Adobe Photoshop File Formats
Specification" version 6.0 downloadable from adobe.com.
*/
(void) ResetMagickMemory(point,0,sizeof(point));
(void) ResetMagickMemory(first,0,sizeof(first));
(void) ResetMagickMemory(last,0,sizeof(last));
knot_count=0;
in_subpath=MagickFalse;
while (length > 0)
{
selector=(ssize_t) ReadPropertyMSBShort(&blob,&length);
switch (selector)
{
case 0:
case 3:
{
if (knot_count != 0)
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Expected subpath length record.
*/
knot_count=(ssize_t) ReadPropertyMSBShort(&blob,&length);
blob+=22;
length-=MagickMin(22,(ssize_t) length);
break;
}
case 1:
case 2:
case 4:
case 5:
{
if (knot_count == 0)
{
/*
Unexpected subpath knot
*/
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Add sub-path knot
*/
for (i=0; i < 3; i++)
{
size_t
xx,
yy;
yy=(size_t) ReadPropertyMSBLong(&blob,&length);
xx=(size_t) ReadPropertyMSBLong(&blob,&length);
x=(ssize_t) xx;
if (xx > 2147483647)
x=(ssize_t) xx-4294967295U-1;
y=(ssize_t) yy;
if (yy > 2147483647)
y=(ssize_t) yy-4294967295U-1;
point[i].x=(double) x/4096/4096;
point[i].y=1.0-(double) y/4096/4096;
}
if (in_subpath == MagickFalse)
{
(void) FormatLocaleString(message,MagickPathExtent," %g %g m\n",
point[1].x,point[1].y);
for (i=0; i < 3; i++)
{
first[i]=point[i];
last[i]=point[i];
}
}
else
{
/*
Handle special cases when Bezier curves are used to describe
corners and straight lines.
*/
if ((last[1].x == last[2].x) && (last[1].y == last[2].y) &&
(point[0].x == point[1].x) && (point[0].y == point[1].y))
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g l\n",point[1].x,point[1].y);
else
if ((last[1].x == last[2].x) && (last[1].y == last[2].y))
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g %g %g v\n",point[0].x,point[0].y,
point[1].x,point[1].y);
else
if ((point[0].x == point[1].x) && (point[0].y == point[1].y))
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g %g %g y\n",last[2].x,last[2].y,
point[1].x,point[1].y);
else
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g %g %g %g %g c\n",last[2].x,
last[2].y,point[0].x,point[0].y,point[1].x,point[1].y);
for (i=0; i < 3; i++)
last[i]=point[i];
}
(void) ConcatenateString(&path,message);
in_subpath=MagickTrue;
knot_count--;
/*
Close the subpath if there are no more knots.
*/
if (knot_count == 0)
{
/*
Same special handling as above except we compare to the
first point in the path and close the path.
*/
if ((last[1].x == last[2].x) && (last[1].y == last[2].y) &&
(first[0].x == first[1].x) && (first[0].y == first[1].y))
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g l z\n",first[1].x,first[1].y);
else
if ((last[1].x == last[2].x) && (last[1].y == last[2].y))
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g %g %g v z\n",first[0].x,first[0].y,
first[1].x,first[1].y);
else
if ((first[0].x == first[1].x) && (first[0].y == first[1].y))
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g %g %g y z\n",last[2].x,last[2].y,
first[1].x,first[1].y);
else
(void) FormatLocaleString(message,MagickPathExtent,
" %g %g %g %g %g %g c z\n",last[2].x,
last[2].y,first[0].x,first[0].y,first[1].x,first[1].y);
(void) ConcatenateString(&path,message);
in_subpath=MagickFalse;
}
break;
}
case 6:
case 7:
case 8:
default:
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
}
}
/*
Returns an empty PS path if the path has no knots.
*/
(void) FormatLocaleString(message,MagickPathExtent," eoclip\n");
(void) ConcatenateString(&path,message);
(void) FormatLocaleString(message,MagickPathExtent,"} bind def");
(void) ConcatenateString(&path,message);
message=DestroyString(message);
return(path);
}
static char *TraceSVGClippath(const unsigned char *blob,size_t length,
const size_t columns,const size_t rows)
{
char
*path,
*message;
MagickBooleanType
in_subpath;
PointInfo
first[3],
last[3],
point[3];
register ssize_t
i;
ssize_t
knot_count,
selector,
x,
y;
path=AcquireString((char *) NULL);
if (path == (char *) NULL)
return((char *) NULL);
message=AcquireString((char *) NULL);
(void) FormatLocaleString(message,MagickPathExtent,(
"<?xml version=\"1.0\" encoding=\"iso-8859-1\"?>\n"
"<svg xmlns=\"http://www.w3.org/2000/svg\""
" width=\"%.20g\" height=\"%.20g\">\n"
"<g>\n"
"<path fill-rule=\"evenodd\" style=\"fill:#00000000;stroke:#00000000;"
"stroke-width:0;stroke-antialiasing:false\" d=\"\n"),(double) columns,
(double) rows);
(void) ConcatenateString(&path,message);
(void) ResetMagickMemory(point,0,sizeof(point));
(void) ResetMagickMemory(first,0,sizeof(first));
(void) ResetMagickMemory(last,0,sizeof(last));
knot_count=0;
in_subpath=MagickFalse;
while (length != 0)
{
selector=(ssize_t) ReadPropertyMSBShort(&blob,&length);
switch (selector)
{
case 0:
case 3:
{
if (knot_count != 0)
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Expected subpath length record.
*/
knot_count=(ssize_t) ReadPropertyMSBShort(&blob,&length);
blob+=22;
length-=MagickMin(22,(ssize_t) length);
break;
}
case 1:
case 2:
case 4:
case 5:
{
if (knot_count == 0)
{
/*
Unexpected subpath knot.
*/
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
/*
Add sub-path knot
*/
for (i=0; i < 3; i++)
{
unsigned int
xx,
yy;
yy=(unsigned int) ReadPropertyMSBLong(&blob,&length);
xx=(unsigned int) ReadPropertyMSBLong(&blob,&length);
x=(ssize_t) xx;
if (xx > 2147483647)
x=(ssize_t) xx-4294967295U-1;
y=(ssize_t) yy;
if (yy > 2147483647)
y=(ssize_t) yy-4294967295U-1;
point[i].x=(double) x*columns/4096/4096;
point[i].y=(double) y*rows/4096/4096;
}
if (in_subpath == MagickFalse)
{
(void) FormatLocaleString(message,MagickPathExtent,"M %g %g\n",
point[1].x,point[1].y);
for (i=0; i < 3; i++)
{
first[i]=point[i];
last[i]=point[i];
}
}
else
{
/*
Handle special cases when Bezier curves are used to describe
corners and straight lines.
*/
if ((last[1].x == last[2].x) && (last[1].y == last[2].y) &&
(point[0].x == point[1].x) && (point[0].y == point[1].y))
(void) FormatLocaleString(message,MagickPathExtent,
"L %g %g\n",point[1].x,point[1].y);
else
(void) FormatLocaleString(message,MagickPathExtent,
"C %g %g %g %g %g %g\n",last[2].x,
last[2].y,point[0].x,point[0].y,point[1].x,point[1].y);
for (i=0; i < 3; i++)
last[i]=point[i];
}
(void) ConcatenateString(&path,message);
in_subpath=MagickTrue;
knot_count--;
/*
Close the subpath if there are no more knots.
*/
if (knot_count == 0)
{
/*
Same special handling as above except we compare to the
first point in the path and close the path.
*/
if ((last[1].x == last[2].x) && (last[1].y == last[2].y) &&
(first[0].x == first[1].x) && (first[0].y == first[1].y))
(void) FormatLocaleString(message,MagickPathExtent,
"L %g %g Z\n",first[1].x,first[1].y);
else
(void) FormatLocaleString(message,MagickPathExtent,
"C %g %g %g %g %g %g Z\n",last[2].x,
last[2].y,first[0].x,first[0].y,first[1].x,first[1].y);
(void) ConcatenateString(&path,message);
in_subpath=MagickFalse;
}
break;
}
case 6:
case 7:
case 8:
default:
{
blob+=24;
length-=MagickMin(24,(ssize_t) length);
break;
}
}
}
/*
Return an empty SVG image if the path does not have knots.
*/
(void) ConcatenateString(&path,"\"/>\n</g>\n</svg>\n");
message=DestroyString(message);
return(path);
}
MagickExport const char *GetImageProperty(const Image *image,
const char *property,ExceptionInfo *exception)
{
register const char
*p;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
p=(const char *) NULL;
if (image->properties != (void *) NULL)
{
if (property == (const char *) NULL)
{
ResetSplayTreeIterator((SplayTreeInfo *) image->properties);
p=(const char *) GetNextValueInSplayTree((SplayTreeInfo *)
image->properties);
return(p);
}
p=(const char *) GetValueFromSplayTree((SplayTreeInfo *)
image->properties,property);
if (p != (const char *) NULL)
return(p);
}
if ((property == (const char *) NULL) ||
(strchr(property,':') == (char *) NULL))
return(p);
switch (*property)
{
case '8':
{
if (LocaleNCompare("8bim:",property,5) == 0)
{
(void) Get8BIMProperty(image,property,exception);
break;
}
break;
}
case 'E':
case 'e':
{
if (LocaleNCompare("exif:",property,5) == 0)
{
(void) GetEXIFProperty(image,property,exception);
break;
}
break;
}
case 'I':
case 'i':
{
if ((LocaleNCompare("icc:",property,4) == 0) ||
(LocaleNCompare("icm:",property,4) == 0))
{
(void) GetICCProperty(image,property,exception);
break;
}
if (LocaleNCompare("iptc:",property,5) == 0)
{
(void) GetIPTCProperty(image,property,exception);
break;
}
break;
}
case 'X':
case 'x':
{
if (LocaleNCompare("xmp:",property,4) == 0)
{
(void) GetXMPProperty(image,property);
break;
}
break;
}
default:
break;
}
if (image->properties != (void *) NULL)
{
p=(const char *) GetValueFromSplayTree((SplayTreeInfo *)
image->properties,property);
return(p);
}
return((const char *) NULL);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t M a g i c k P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetMagickProperty() gets attributes or calculated values that is associated
% with a fixed known property name, or single letter property. It may be
% called if no image is defined (IMv7), in which case only global image_info
% values are available:
%
% \n newline
% \r carriage return
% < less-than character.
% > greater-than character.
% & ampersand character.
% %% a percent sign
% %b file size of image read in
% %c comment meta-data property
% %d directory component of path
% %e filename extension or suffix
% %f filename (including suffix)
% %g layer canvas page geometry (equivalent to "%Wx%H%X%Y")
% %h current image height in pixels
% %i image filename (note: becomes output filename for "info:")
% %k CALCULATED: number of unique colors
% %l label meta-data property
% %m image file format (file magic)
% %n number of images in current image sequence
% %o output filename (used for delegates)
% %p index of image in current image list
% %q quantum depth (compile-time constant)
% %r image class and colorspace
% %s scene number (from input unless re-assigned)
% %t filename without directory or extension (suffix)
% %u unique temporary filename (used for delegates)
% %w current width in pixels
% %x x resolution (density)
% %y y resolution (density)
% %z image depth (as read in unless modified, image save depth)
% %A image transparency channel enabled (true/false)
% %C image compression type
% %D image GIF dispose method
% %G original image size (%wx%h; before any resizes)
% %H page (canvas) height
% %M Magick filename (original file exactly as given, including read mods)
% %O page (canvas) offset ( = %X%Y )
% %P page (canvas) size ( = %Wx%H )
% %Q image compression quality ( 0 = default )
% %S ?? scenes ??
% %T image time delay (in centi-seconds)
% %U image resolution units
% %W page (canvas) width
% %X page (canvas) x offset (including sign)
% %Y page (canvas) y offset (including sign)
% %Z unique filename (used for delegates)
% %@ CALCULATED: trim bounding box (without actually trimming)
% %# CALCULATED: 'signature' hash of image values
%
% This routine only handles specifically known properties. It does not
% handle special prefixed properties, profiles, or expressions. Nor does
% it return any free-form property strings.
%
% The returned string is stored in a structure somewhere, and should not be
% directly freed. If the string was generated (common) the string will be
% stored as as either as artifact or option 'get-property'. These may be
% deleted (cleaned up) when no longer required, but neither artifact or
% option is guranteed to exist.
%
% The format of the GetMagickProperty method is:
%
% const char *GetMagickProperty(ImageInfo *image_info,Image *image,
% const char *property,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info (optional)
%
% o image: the image (optional)
%
% o key: the key.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const char *GetMagickPropertyLetter(ImageInfo *image_info,
Image *image,const char letter,ExceptionInfo *exception)
{
#define WarnNoImageReturn(format,arg) \
if (image == (Image *) NULL ) { \
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, \
"NoImageForProperty",format,arg); \
return((const char *) NULL); \
}
#define WarnNoImageInfoReturn(format,arg) \
if (image_info == (ImageInfo *) NULL ) { \
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, \
"NoImageInfoForProperty",format,arg); \
return((const char *) NULL); \
}
char
value[MagickPathExtent]; /* formatted string to store as an artifact */
const char
*string; /* return a string already stored somewher */
if ((image != (Image *) NULL) && (image->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
else
if ((image_info != (ImageInfo *) NULL) &&
(image_info->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s","no-images");
*value='\0'; /* formatted string */
string=(char *) NULL; /* constant string reference */
/*
Get properities that are directly defined by images.
*/
switch (letter)
{
case 'b': /* image size read in - in bytes */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatMagickSize(image->extent,MagickFalse,"B",MagickPathExtent,
value);
if (image->extent == 0)
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",
MagickPathExtent,value);
break;
}
case 'c': /* image comment property - empty string by default */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=GetImageProperty(image,"comment",exception);
if ( string == (const char *) NULL )
string="";
break;
}
case 'd': /* Directory component of filename */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,HeadPath,value);
if (*value == '\0')
string="";
break;
}
case 'e': /* Filename extension (suffix) of image file */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,ExtensionPath,value);
if (*value == '\0')
string="";
break;
}
case 'f': /* Filename without directory component */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,TailPath,value);
if (*value == '\0')
string="";
break;
}
case 'g': /* Image geometry, canvas and offset %Wx%H+%X+%Y */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) image->page.width,(double)
image->page.height,(double) image->page.x,(double) image->page.y);
break;
}
case 'h': /* Image height (current) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->rows != 0 ? image->rows : image->magick_rows));
break;
}
case 'i': /* Filename last used for an image (read or write) */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=image->filename;
break;
}
case 'k': /* Number of unique colors */
{
/*
FUTURE: ensure this does not generate the formatted comment!
*/
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetNumberColors(image,(FILE *) NULL,exception));
break;
}
case 'l': /* Image label property - empty string by default */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=GetImageProperty(image,"label",exception);
if (string == (const char *) NULL)
string="";
break;
}
case 'm': /* Image format (file magick) */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=image->magick;
break;
}
case 'n': /* Number of images in the list. */
{
if ( image != (Image *) NULL )
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
else
string="0"; /* no images or scenes */
break;
}
case 'o': /* Output Filename - for delegate use only */
WarnNoImageInfoReturn("\"%%%c\"",letter);
string=image_info->filename;
break;
case 'p': /* Image index in current image list */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageIndexInList(image));
break;
}
case 'q': /* Quantum depth of image in memory */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
MAGICKCORE_QUANTUM_DEPTH);
break;
}
case 'r': /* Image storage class, colorspace, and alpha enabled. */
{
ColorspaceType
colorspace;
WarnNoImageReturn("\"%%%c\"",letter);
colorspace=image->colorspace;
if (SetImageGray(image,exception) != MagickFalse)
colorspace=GRAYColorspace; /* FUTURE: this is IMv6 not IMv7 */
(void) FormatLocaleString(value,MagickPathExtent,"%s %s %s",
CommandOptionToMnemonic(MagickClassOptions,(ssize_t)
image->storage_class),CommandOptionToMnemonic(MagickColorspaceOptions,
(ssize_t) colorspace),image->alpha_trait != UndefinedPixelTrait ?
"Alpha" : "");
break;
}
case 's': /* Image scene number */
{
#if 0 /* this seems non-sensical -- simplifing */
if (image_info->number_scenes != 0)
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image_info->scene);
else if (image != (Image *) NULL)
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->scene);
else
string="0";
#else
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->scene);
#endif
break;
}
case 't': /* Base filename without directory or extention */
{
WarnNoImageReturn("\"%%%c\"",letter);
GetPathComponent(image->magick_filename,BasePath,value);
if (*value == '\0')
string="";
break;
}
case 'u': /* Unique filename */
{
WarnNoImageInfoReturn("\"%%%c\"",letter);
string=image_info->unique;
break;
}
case 'w': /* Image width (current) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->columns != 0 ? image->columns : image->magick_columns));
break;
}
case 'x': /* Image horizontal resolution (with units) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",
fabs(image->resolution.x) > MagickEpsilon ? image->resolution.x : 72.0);
break;
}
case 'y': /* Image vertical resolution (with units) */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",
fabs(image->resolution.y) > MagickEpsilon ? image->resolution.y : 72.0);
break;
}
case 'z': /* Image depth as read in */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->depth);
break;
}
case 'A': /* Image alpha channel */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickPixelTraitOptions,(ssize_t)
image->alpha_trait);
break;
}
case 'C': /* Image compression method. */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickCompressOptions,(ssize_t)
image->compression);
break;
}
case 'D': /* Image dispose method. */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickDisposeOptions,(ssize_t)
image->dispose);
break;
}
case 'G': /* Image size as geometry = "%wx%h" */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20gx%.20g",(double)
image->magick_columns,(double) image->magick_rows);
break;
}
case 'H': /* layer canvas height */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->page.height);
break;
}
case 'M': /* Magick filename - filename given incl. coder & read mods */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=image->magick_filename;
break;
}
case 'O': /* layer canvas offset with sign = "+%X+%Y" */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%+ld%+ld",(long)
image->page.x,(long) image->page.y);
break;
}
case 'P': /* layer canvas page size = "%Wx%H" */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20gx%.20g",(double)
image->page.width,(double) image->page.height);
break;
}
case 'Q': /* image compression quality */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->quality == 0 ? 92 : image->quality));
break;
}
case 'S': /* Number of scenes in image list. */
{
WarnNoImageInfoReturn("\"%%%c\"",letter);
#if 0 /* What is this number? -- it makes no sense - simplifing */
if (image_info->number_scenes == 0)
string="2147483647";
else if ( image != (Image *) NULL )
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image_info->scene+image_info->number_scenes);
else
string="0";
#else
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image_info->number_scenes == 0 ? 2147483647 :
image_info->number_scenes));
#endif
break;
}
case 'T': /* image time delay for animations */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->delay);
break;
}
case 'U': /* Image resolution units. */
{
WarnNoImageReturn("\"%%%c\"",letter);
string=CommandOptionToMnemonic(MagickResolutionOptions,(ssize_t)
image->units);
break;
}
case 'W': /* layer canvas width */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->page.width);
break;
}
case 'X': /* layer canvas X offset */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%+.20g",(double)
image->page.x);
break;
}
case 'Y': /* layer canvas Y offset */
{
WarnNoImageReturn("\"%%%c\"",letter);
(void) FormatLocaleString(value,MagickPathExtent,"%+.20g",(double)
image->page.y);
break;
}
case '%': /* percent escaped */
{
string="%";
break;
}
case '@': /* Trim bounding box, without actually Trimming! */
{
RectangleInfo
page;
WarnNoImageReturn("\"%%%c\"",letter);
page=GetImageBoundingBox(image,exception);
(void) FormatLocaleString(value,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) page.width,(double) page.height,
(double) page.x,(double)page.y);
break;
}
case '#':
{
/*
Image signature.
*/
WarnNoImageReturn("\"%%%c\"",letter);
(void) SignatureImage(image,exception);
string=GetImageProperty(image,"signature",exception);
break;
}
}
if (string != (char *) NULL)
return(string);
if (*value != '\0')
{
/*
Create a cloned copy of result.
*/
if (image != (Image *) NULL)
{
(void) SetImageArtifact(image,"get-property",value);
return(GetImageArtifact(image,"get-property"));
}
else
{
(void) SetImageOption(image_info,"get-property",value);
return(GetImageOption(image_info,"get-property"));
}
}
return((char *) NULL);
}
MagickExport const char *GetMagickProperty(ImageInfo *image_info,
Image *image,const char *property,ExceptionInfo *exception)
{
char
value[MagickPathExtent];
const char
*string;
assert(property[0] != '\0');
assert(image != (Image *) NULL || image_info != (ImageInfo *) NULL );
if (property[1] == '\0') /* single letter property request */
return(GetMagickPropertyLetter(image_info,image,*property,exception));
if ((image != (Image *) NULL) && (image->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
else
if ((image_info != (ImageInfo *) NULL) &&
(image_info->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s","no-images");
*value='\0'; /* formated string */
string=(char *) NULL; /* constant string reference */
switch (*property)
{
case 'b':
{
if (LocaleCompare("basename",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
GetPathComponent(image->magick_filename,BasePath,value);
if (*value == '\0')
string="";
break;
}
if (LocaleCompare("bit-depth",property) == 0)
{
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageDepth(image,exception));
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channels",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
/* FUTURE: return actual image channels */
(void) FormatLocaleString(value,MagickPathExtent,"%s",
CommandOptionToMnemonic(MagickColorspaceOptions,(ssize_t)
image->colorspace));
LocaleLower(value);
if( image->alpha_trait != UndefinedPixelTrait )
(void) ConcatenateMagickString(value,"a",MagickPathExtent);
break;
}
if (LocaleCompare("colorspace",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
/* FUTURE: return actual colorspace - no 'gray' stuff */
string=CommandOptionToMnemonic(MagickColorspaceOptions,(ssize_t)
image->colorspace);
break;
}
if (LocaleCompare("compose",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickComposeOptions,(ssize_t)
image->compose);
break;
}
if (LocaleCompare("copyright",property) == 0)
{
(void) CopyMagickString(value,GetMagickCopyright(),MagickPathExtent);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("depth",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->depth);
break;
}
if (LocaleCompare("directory",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
GetPathComponent(image->magick_filename,HeadPath,value);
if (*value == '\0')
string="";
break;
}
break;
}
case 'e':
{
if (LocaleCompare("entropy",property) == 0)
{
double
entropy;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageEntropy(image,&entropy,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),entropy);
break;
}
if (LocaleCompare("extension",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
GetPathComponent(image->magick_filename,ExtensionPath,value);
if (*value == '\0')
string="";
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gamma",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),image->gamma);
break;
}
break;
}
case 'h':
{
if (LocaleCompare("height",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",
image->magick_rows != 0 ? (double) image->magick_rows : 256.0);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("input",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=image->filename;
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kurtosis",property) == 0)
{
double
kurtosis,
skewness;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),kurtosis);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("magick",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=image->magick;
break;
}
if ((LocaleCompare("maxima",property) == 0) ||
(LocaleCompare("max",property) == 0))
{
double
maximum,
minimum;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageRange(image,&minimum,&maximum,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),maximum);
break;
}
if (LocaleCompare("mean",property) == 0)
{
double
mean,
standard_deviation;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),mean);
break;
}
if ((LocaleCompare("minima",property) == 0) ||
(LocaleCompare("min",property) == 0))
{
double
maximum,
minimum;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageRange(image,&minimum,&maximum,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),minimum);
break;
}
break;
}
case 'o':
{
if (LocaleCompare("opaque",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickBooleanOptions,(ssize_t)
IsImageOpaque(image,exception));
break;
}
if (LocaleCompare("orientation",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickOrientationOptions,(ssize_t)
image->orientation);
break;
}
if (LocaleCompare("output",property) == 0)
{
WarnNoImageInfoReturn("\"%%[%s]\"",property);
(void) CopyMagickString(value,image_info->filename,MagickPathExtent);
break;
}
break;
}
case 'p':
{
#if defined(MAGICKCORE_LCMS_DELEGATE)
if (LocaleCompare("profile:icc",property) == 0 ||
LocaleCompare("profile:icm",property) == 0)
{
#if !defined(LCMS_VERSION) || (LCMS_VERSION < 2000)
#define cmsUInt32Number DWORD
#endif
const StringInfo
*profile;
cmsHPROFILE
icc_profile;
profile=GetImageProfile(image,property+8);
if (profile == (StringInfo *) NULL)
break;
icc_profile=cmsOpenProfileFromMem(GetStringInfoDatum(profile),
(cmsUInt32Number) GetStringInfoLength(profile));
if (icc_profile != (cmsHPROFILE *) NULL)
{
#if defined(LCMS_VERSION) && (LCMS_VERSION < 2000)
string=cmsTakeProductName(icc_profile);
#else
(void) cmsGetProfileInfoASCII(icc_profile,cmsInfoDescription,
"en","US",value,MagickPathExtent);
#endif
(void) cmsCloseProfile(icc_profile);
}
}
#endif
if (LocaleCompare("profiles",property) == 0)
{
const char
*name;
ResetImageProfileIterator(image);
name=GetNextImageProfile(image);
if (name != (char *) NULL)
{
(void) CopyMagickString(value,name,MagickPathExtent);
name=GetNextImageProfile(image);
while (name != (char *) NULL)
{
ConcatenateMagickString(value,",",MagickPathExtent);
ConcatenateMagickString(value,name,MagickPathExtent);
name=GetNextImageProfile(image);
}
}
break;
}
break;
}
case 'r':
{
if (LocaleCompare("resolution.x",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.x);
break;
}
if (LocaleCompare("resolution.y",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%g",
image->resolution.y);
break;
}
break;
}
case 's':
{
if (LocaleCompare("scene",property) == 0)
{
WarnNoImageInfoReturn("\"%%[%s]\"",property);
if (image_info->number_scenes != 0)
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image_info->scene);
else {
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
image->scene);
}
break;
}
if (LocaleCompare("scenes",property) == 0)
{
/* FUTURE: equivelent to %n? */
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
GetImageListLength(image));
break;
}
if (LocaleCompare("size",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",
MagickPathExtent,value);
break;
}
if (LocaleCompare("skewness",property) == 0)
{
double
kurtosis,
skewness;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),skewness);
break;
}
if (LocaleCompare("standard-deviation",property) == 0)
{
double
mean,
standard_deviation;
WarnNoImageReturn("\"%%[%s]\"",property);
(void) GetImageMean(image,&mean,&standard_deviation,exception);
(void) FormatLocaleString(value,MagickPathExtent,"%.*g",
GetMagickPrecision(),standard_deviation);
break;
}
break;
}
case 't':
{
if (LocaleCompare("type",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickTypeOptions,(ssize_t)
IdentifyImageType(image,exception));
break;
}
break;
}
case 'u':
{
if (LocaleCompare("unique",property) == 0)
{
WarnNoImageInfoReturn("\"%%[%s]\"",property);
string=image_info->unique;
break;
}
if (LocaleCompare("units",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
string=CommandOptionToMnemonic(MagickResolutionOptions,(ssize_t)
image->units);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("version",property) == 0)
{
string=GetMagickVersion((size_t *) NULL);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("width",property) == 0)
{
WarnNoImageReturn("\"%%[%s]\"",property);
(void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double)
(image->magick_columns != 0 ? image->magick_columns : 256));
break;
}
break;
}
}
if (string != (char *) NULL)
return(string);
if (*value != '\0')
{
/*
Create a cloned copy of result, that will get cleaned up, eventually.
*/
if (image != (Image *) NULL)
{
(void) SetImageArtifact(image,"get-property",value);
return(GetImageArtifact(image,"get-property"));
}
else
{
(void) SetImageOption(image_info,"get-property",value);
return(GetImageOption(image_info,"get-property"));
}
}
return((char *) NULL);
}
#undef WarnNoImageReturn
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProperty() gets the next free-form string property name.
%
% The format of the GetNextImageProperty method is:
%
% char *GetNextImageProperty(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetNextImageProperty(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image->filename);
if (image->properties == (void *) NULL)
return((const char *) NULL);
return((const char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->properties));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e P r o p e r t i e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageProperties() replaces any embedded formatting characters with
% the appropriate image property and returns the interpreted text.
%
% This searches for and replaces
% \n \r \% replaced by newline, return, and percent resp.
% < > & replaced by '<', '>', '&' resp.
% %% replaced by percent
%
% %x %[x] where 'x' is a single letter properity, case sensitive).
% %[type:name] where 'type' a is special and known prefix.
% %[name] where 'name' is a specifically known attribute, calculated
% value, or a per-image property string name, or a per-image
% 'artifact' (as generated from a global option).
% It may contain ':' as long as the prefix is not special.
%
% Single letter % substitutions will only happen if the character before the
% percent is NOT a number. But braced substitutions will always be performed.
% This prevents the typical usage of percent in a interpreted geometry
% argument from being substituted when the percent is a geometry flag.
%
% If 'glob-expresions' ('*' or '?' characters) is used for 'name' it may be
% used as a search pattern to print multiple lines of "name=value\n" pairs of
% the associacted set of properties.
%
% The returned string must be freed using DestoryString() by the caller.
%
% The format of the InterpretImageProperties method is:
%
% char *InterpretImageProperties(ImageInfo *image_info,
% Image *image,const char *embed_text,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info. (required)
%
% o image: the image. (optional)
%
% o embed_text: the address of a character string containing the embedded
% formatting characters.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport char *InterpretImageProperties(ImageInfo *image_info,Image *image,
const char *embed_text,ExceptionInfo *exception)
{
#define ExtendInterpretText(string_length) \
DisableMSCWarning(4127) \
{ \
size_t length=(string_length); \
if ((size_t) (q-interpret_text+length+1) >= extent) \
{ \
extent+=length; \
interpret_text=(char *) ResizeQuantumMemory(interpret_text,extent+ \
MaxTextExtent,sizeof(*interpret_text)); \
if (interpret_text == (char *) NULL) \
return((char *) NULL); \
q=interpret_text+strlen(interpret_text); \
} \
} \
RestoreMSCWarning
#define AppendKeyValue2Text(key,value)\
DisableMSCWarning(4127) \
{ \
size_t length=strlen(key)+strlen(value)+2; \
if ((size_t) (q-interpret_text+length+1) >= extent) \
{ \
extent+=length; \
interpret_text=(char *) ResizeQuantumMemory(interpret_text,extent+ \
MaxTextExtent,sizeof(*interpret_text)); \
if (interpret_text == (char *) NULL) \
return((char *) NULL); \
q=interpret_text+strlen(interpret_text); \
} \
q+=FormatLocaleString(q,extent,"%s=%s\n",(key),(value)); \
} \
RestoreMSCWarning
#define AppendString2Text(string) \
DisableMSCWarning(4127) \
{ \
size_t length=strlen((string)); \
if ((size_t) (q-interpret_text+length+1) >= extent) \
{ \
extent+=length; \
interpret_text=(char *) ResizeQuantumMemory(interpret_text,extent+ \
MaxTextExtent,sizeof(*interpret_text)); \
if (interpret_text == (char *) NULL) \
return((char *) NULL); \
q=interpret_text+strlen(interpret_text); \
} \
(void) CopyMagickString(q,(string),extent); \
q+=length; \
} \
RestoreMSCWarning
char
*interpret_text;
MagickBooleanType
number;
register char
*q; /* current position in interpret_text */
register const char
*p; /* position in embed_text string being expanded */
size_t
extent; /* allocated length of interpret_text */
assert(image == NULL || image->signature == MagickCoreSignature);
assert(image_info == NULL || image_info->signature == MagickCoreSignature);
if ((image != (Image *) NULL) && (image->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
else
if ((image_info != (ImageInfo *) NULL) && (image_info->debug != MagickFalse))
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s","no-image");
if (embed_text == (const char *) NULL)
return(ConstantString(""));
p=embed_text;
while ((isspace((int) ((unsigned char) *p)) != 0) && (*p != '\0'))
p++;
if (*p == '\0')
return(ConstantString(""));
if ((*p == '@') && (IsPathAccessible(p+1) != MagickFalse))
{
/*
Handle a '@' replace string from file.
*/
if (IsRightsAuthorized(PathPolicyDomain,ReadPolicyRights,p) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",p);
return(ConstantString(""));
}
interpret_text=FileToString(p+1,~0UL,exception);
if (interpret_text != (char *) NULL)
return(interpret_text);
}
/*
Translate any embedded format characters.
*/
interpret_text=AcquireString(embed_text); /* new string with extra space */
extent=MagickPathExtent; /* allocated space in string */
number=MagickFalse; /* is last char a number? */
for (q=interpret_text; *p!='\0'; number=isdigit(*p) ? MagickTrue : MagickFalse,p++)
{
/*
Look for the various escapes, (and handle other specials)
*/
*q='\0';
ExtendInterpretText(MagickPathExtent);
switch (*p)
{
case '\\':
{
switch (*(p+1))
{
case '\0':
continue;
case 'r': /* convert to RETURN */
{
*q++='\r';
p++;
continue;
}
case 'n': /* convert to NEWLINE */
{
*q++='\n';
p++;
continue;
}
case '\n': /* EOL removal UNIX,MacOSX */
{
p++;
continue;
}
case '\r': /* EOL removal DOS,Windows */
{
p++;
if (*p == '\n') /* return-newline EOL */
p++;
continue;
}
default:
{
p++;
*q++=(*p);
}
}
continue;
}
case '&':
{
if (LocaleNCompare("<",p,4) == 0)
{
*q++='<';
p+=3;
}
else
if (LocaleNCompare(">",p,4) == 0)
{
*q++='>';
p+=3;
}
else
if (LocaleNCompare("&",p,5) == 0)
{
*q++='&';
p+=4;
}
else
*q++=(*p);
continue;
}
case '%':
break; /* continue to next set of handlers */
default:
{
*q++=(*p); /* any thing else is 'as normal' */
continue;
}
}
p++; /* advance beyond the percent */
/*
Doubled Percent - or percent at end of string.
*/
if ((*p == '\0') || (*p == '\'') || (*p == '"'))
p--;
if (*p == '%')
{
*q++='%';
continue;
}
/*
Single letter escapes %c.
*/
if (*p != '[')
{
const char
*string;
if (number != MagickFalse)
{
/*
But only if not preceeded by a number!
*/
*q++='%'; /* do NOT substitute the percent */
p--; /* back up one */
continue;
}
string=GetMagickPropertyLetter(image_info,image,*p, exception);
if (string != (char *) NULL)
{
AppendString2Text(string);
if (image != (Image *) NULL)
(void) DeleteImageArtifact(image,"get-property");
if (image_info != (ImageInfo *) NULL)
(void) DeleteImageOption(image_info,"get-property");
continue;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"UnknownImageProperty","\"%%%c\"",*p);
continue;
}
{
char
pattern[2*MagickPathExtent];
const char
*key,
*string;
register ssize_t
len;
ssize_t
depth;
/*
Braced Percent Escape %[...].
*/
p++; /* advance p to just inside the opening brace */
depth=1;
if (*p == ']')
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"UnknownImageProperty","\"%%[]\"");
break;
}
for (len=0; len<(MagickPathExtent-1L) && (*p != '\0');)
{
if ((*p == '\\') && (*(p+1) != '\0'))
{
/*
Skip escaped braces within braced pattern.
*/
pattern[len++]=(*p++);
pattern[len++]=(*p++);
continue;
}
if (*p == '[')
depth++;
if (*p == ']')
depth--;
if (depth <= 0)
break;
pattern[len++]=(*p++);
}
pattern[len]='\0';
if (depth != 0)
{
/*
Check for unmatched final ']' for "%[...]".
*/
if (len >= 64)
{
pattern[61] = '.'; /* truncate string for error message */
pattern[62] = '.';
pattern[63] = '.';
pattern[64] = '\0';
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedBraces","\"%%[%s\"",pattern);
interpret_text=DestroyString(interpret_text);
return((char *) NULL);
}
/*
Special Lookup Prefixes %[prefix:...].
*/
if (LocaleNCompare("fx:",pattern,3) == 0)
{
double
value;
FxInfo
*fx_info;
MagickBooleanType
status;
/*
FX - value calculator.
*/
if (image == (Image *) NULL )
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"NoImageForProperty","\"%%[%s]\"",pattern);
continue; /* else no image to retrieve artifact */
}
fx_info=AcquireFxInfo(image,pattern+3,exception);
status=FxEvaluateChannelExpression(fx_info,IntensityPixelChannel,0,0,
&value,exception);
fx_info=DestroyFxInfo(fx_info);
if (status != MagickFalse)
{
char
result[MagickPathExtent];
(void) FormatLocaleString(result,MagickPathExtent,"%.*g",
GetMagickPrecision(),(double) value);
AppendString2Text(result);
}
continue;
}
if (LocaleNCompare("pixel:",pattern,6) == 0)
{
FxInfo
*fx_info;
double
value;
MagickStatusType
status;
PixelInfo
pixel;
/*
Pixel - color value calculator.
*/
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"NoImageForProperty","\"%%[%s]\"",pattern);
continue; /* else no image to retrieve artifact */
}
GetPixelInfo(image,&pixel);
fx_info=AcquireFxInfo(image,pattern+6,exception);
status=FxEvaluateChannelExpression(fx_info,RedPixelChannel,0,0,
&value,exception);
pixel.red=(double) QuantumRange*value;
status&=FxEvaluateChannelExpression(fx_info,GreenPixelChannel,0,0,
&value,exception);
pixel.green=(double) QuantumRange*value;
status&=FxEvaluateChannelExpression(fx_info,BluePixelChannel,0,0,
&value,exception);
pixel.blue=(double) QuantumRange*value;
if (image->colorspace == CMYKColorspace)
{
status&=FxEvaluateChannelExpression(fx_info,BlackPixelChannel,0,0,
&value,exception);
pixel.black=(double) QuantumRange*value;
}
status&=FxEvaluateChannelExpression(fx_info,AlphaPixelChannel,0,0,
&value,exception);
pixel.alpha=(double) QuantumRange*value;
fx_info=DestroyFxInfo(fx_info);
if (status != MagickFalse)
{
char
name[MagickPathExtent];
(void) QueryColorname(image,&pixel,SVGCompliance,name,
exception);
AppendString2Text(name);
}
continue;
}
if (LocaleNCompare("option:",pattern,7) == 0)
{
/*
Option - direct global option lookup (with globbing).
*/
if (image_info == (ImageInfo *) NULL )
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"NoImageForProperty","\"%%[%s]\"",pattern);
continue; /* else no image to retrieve artifact */
}
if (IsGlob(pattern+7) != MagickFalse)
{
ResetImageOptionIterator(image_info);
while ((key=GetNextImageOption(image_info)) != (const char *) NULL)
if (GlobExpression(key,pattern+7,MagickTrue) != MagickFalse)
{
string=GetImageOption(image_info,key);
if (string != (const char *) NULL)
AppendKeyValue2Text(key,string);
/* else - assertion failure? key found but no string value! */
}
continue;
}
string=GetImageOption(image_info,pattern+7);
if (string == (char *) NULL)
goto PropertyLookupFailure; /* no artifact of this specifc name */
AppendString2Text(string);
continue;
}
if (LocaleNCompare("artifact:",pattern,9) == 0)
{
/*
Artifact - direct image artifact lookup (with glob).
*/
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"NoImageForProperty","\"%%[%s]\"",pattern);
continue; /* else no image to retrieve artifact */
}
if (IsGlob(pattern+9) != MagickFalse)
{
ResetImageArtifactIterator(image);
while ((key=GetNextImageArtifact(image)) != (const char *) NULL)
if (GlobExpression(key,pattern+9,MagickTrue) != MagickFalse)
{
string=GetImageArtifact(image,key);
if (string != (const char *) NULL)
AppendKeyValue2Text(key,string);
/* else - assertion failure? key found but no string value! */
}
continue;
}
string=GetImageArtifact(image,pattern+9);
if (string == (char *) NULL)
goto PropertyLookupFailure; /* no artifact of this specifc name */
AppendString2Text(string);
continue;
}
if (LocaleNCompare("property:",pattern,9) == 0)
{
/*
Property - direct image property lookup (with glob).
*/
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"NoImageForProperty","\"%%[%s]\"",pattern);
continue; /* else no image to retrieve artifact */
}
if (IsGlob(pattern+9) != MagickFalse)
{
ResetImagePropertyIterator(image);
while ((key=GetNextImageProperty(image)) != (const char *) NULL)
if (GlobExpression(key,pattern,MagickTrue) != MagickFalse)
{
string=GetImageProperty(image,key,exception);
if (string != (const char *) NULL)
AppendKeyValue2Text(key,string);
/* else - assertion failure? */
}
continue;
}
string=GetImageProperty(image,pattern+9,exception);
if (string == (char *) NULL)
goto PropertyLookupFailure; /* no artifact of this specifc name */
AppendString2Text(string);
continue;
}
if (image != (Image *) NULL)
{
/*
Properties without special prefix. This handles attributes,
properties, and profiles such as %[exif:...]. Note the profile
properties may also include a glob expansion pattern.
*/
string=GetImageProperty(image,pattern,exception);
if (string != (const char *) NULL)
{
AppendString2Text(string);
if (image != (Image *) NULL)
(void)DeleteImageArtifact(image,"get-property");
if (image_info != (ImageInfo *) NULL)
(void)DeleteImageOption(image_info,"get-property");
continue;
}
}
if (IsGlob(pattern) != MagickFalse)
{
/*
Handle property 'glob' patterns such as:
%[*] %[user:array_??] %[filename:e*]>
*/
if (image == (Image *) NULL)
continue; /* else no image to retrieve proprty - no list */
ResetImagePropertyIterator(image);
while ((key=GetNextImageProperty(image)) != (const char *) NULL)
if (GlobExpression(key,pattern,MagickTrue) != MagickFalse)
{
string=GetImageProperty(image,key,exception);
if (string != (const char *) NULL)
AppendKeyValue2Text(key,string);
/* else - assertion failure? */
}
continue;
}
/*
Look for a known property or image attribute such as
%[basename] %[denisty] %[delay]. Also handles a braced single
letter: %[b] %[G] %[g].
*/
string=GetMagickProperty(image_info,image,pattern,exception);
if (string != (const char *) NULL)
{
AppendString2Text(string);
continue;
}
/*
Look for a per-image artifact. This includes option lookup
(FUTURE: interpreted according to image).
*/
if (image != (Image *) NULL)
{
string=GetImageArtifact(image,pattern);
if (string != (char *) NULL)
{
AppendString2Text(string);
continue;
}
}
else
if (image_info != (ImageInfo *) NULL)
{
/*
No image, so direct 'option' lookup (no delayed percent escapes).
*/
string=GetImageOption(image_info,pattern);
if (string != (char *) NULL)
{
AppendString2Text(string);
continue;
}
}
PropertyLookupFailure:
/*
Failed to find any match anywhere!
*/
if (len >= 64)
{
pattern[61] = '.'; /* truncate string for error message */
pattern[62] = '.';
pattern[63] = '.';
pattern[64] = '\0';
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"UnknownImageProperty","\"%%[%s]\"",pattern);
}
}
*q='\0';
return(interpret_text);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProperty() removes a property from the image and returns its
% value.
%
% In this case the ConstantString() value returned should be freed by the
% caller when finished.
%
% The format of the RemoveImageProperty method is:
%
% char *RemoveImageProperty(Image *image,const char *property)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o property: the image property.
%
*/
MagickExport char *RemoveImageProperty(Image *image,const char *property)
{
char
*value;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->properties == (void *) NULL)
return((char *) NULL);
value=(char *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->properties,
property);
return(value);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P r o p e r t y I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePropertyIterator() resets the image properties iterator. Use it
% in conjunction with GetNextImageProperty() to iterate over all the values
% associated with an image property.
%
% The format of the ResetImagePropertyIterator method is:
%
% ResetImagePropertyIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImagePropertyIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->properties == (void *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->properties);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o p e r t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProperty() saves the given string value either to specific known
% attribute or to a freeform property string.
%
% Attempting to set a property that is normally calculated will produce
% an exception.
%
% The format of the SetImageProperty method is:
%
% MagickBooleanType SetImageProperty(Image *image,const char *property,
% const char *value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o property: the image property.
%
% o values: the image property values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageProperty(Image *image,
const char *property,const char *value,ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickStatusType
flags;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->properties == (void *) NULL)
image->properties=NewSplayTree(CompareSplayTreeString,
RelinquishMagickMemory,RelinquishMagickMemory); /* create splay-tree */
if (value == (const char *) NULL)
return(DeleteImageProperty(image,property)); /* delete if NULL */
status=MagickTrue;
if (strlen(property) <= 1)
{
/*
Do not 'set' single letter properties - read only shorthand.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
/* FUTURE: binary chars or quotes in key should produce a error */
/* Set attributes with known names or special prefixes
return result is found, or break to set a free form properity
*/
switch (*property)
{
#if 0 /* Percent escape's sets values with this prefix: for later use
Throwing an exception causes this setting to fail */
case '8':
{
if (LocaleNCompare("8bim:",property,5) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break;
}
#endif
case 'B':
case 'b':
{
if (LocaleCompare("background",property) == 0)
{
(void) QueryColorCompliance(value,AllCompliance,
&image->background_color,exception);
/* check for FUTURE: value exception?? */
/* also add user input to splay tree */
}
break; /* not an attribute, add as a property */
}
case 'C':
case 'c':
{
if (LocaleCompare("channels",property) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
if (LocaleCompare("colorspace",property) == 0)
{
ssize_t
colorspace;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
value);
if (colorspace < 0)
return(MagickFalse); /* FUTURE: value exception?? */
return(SetImageColorspace(image,(ColorspaceType) colorspace,exception));
}
if (LocaleCompare("compose",property) == 0)
{
ssize_t
compose;
compose=ParseCommandOption(MagickComposeOptions,MagickFalse,value);
if (compose < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->compose=(CompositeOperator) compose;
return(MagickTrue);
}
if (LocaleCompare("compress",property) == 0)
{
ssize_t
compression;
compression=ParseCommandOption(MagickCompressOptions,MagickFalse,
value);
if (compression < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->compression=(CompressionType) compression;
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
case 'D':
case 'd':
{
if (LocaleCompare("delay",property) == 0)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(value,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->delay=(ssize_t)
floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
return(MagickTrue);
}
if (LocaleCompare("delay_units",property) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
if (LocaleCompare("density",property) == 0)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(value,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
return(MagickTrue);
}
if (LocaleCompare("depth",property) == 0)
{
image->depth=StringToUnsignedLong(value);
return(MagickTrue);
}
if (LocaleCompare("dispose",property) == 0)
{
ssize_t
dispose;
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,value);
if (dispose < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->dispose=(DisposeType) dispose;
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
#if 0 /* Percent escape's sets values with this prefix: for later use
Throwing an exception causes this setting to fail */
case 'E':
case 'e':
{
if (LocaleNCompare("exif:",property,5) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
}
case 'F':
case 'f':
{
if (LocaleNCompare("fx:",property,3) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
}
#endif
case 'G':
case 'g':
{
if (LocaleCompare("gamma",property) == 0)
{
image->gamma=StringToDouble(value,(char **) NULL);
return(MagickTrue);
}
if (LocaleCompare("gravity",property) == 0)
{
ssize_t
gravity;
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,value);
if (gravity < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->gravity=(GravityType) gravity;
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
case 'H':
case 'h':
{
if (LocaleCompare("height",property) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
}
case 'I':
case 'i':
{
if (LocaleCompare("intensity",property) == 0)
{
ssize_t
intensity;
intensity=ParseCommandOption(MagickIntentOptions,MagickFalse,value);
if (intensity < 0)
return(MagickFalse);
image->intensity=(PixelIntensityMethod) intensity;
return(MagickTrue);
}
if (LocaleCompare("intent",property) == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,MagickFalse,
value);
if (rendering_intent < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->rendering_intent=(RenderingIntent) rendering_intent;
return(MagickTrue);
}
if (LocaleCompare("interpolate",property) == 0)
{
ssize_t
interpolate;
interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse,
value);
if (interpolate < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->interpolate=(PixelInterpolateMethod) interpolate;
return(MagickTrue);
}
#if 0 /* Percent escape's sets values with this prefix: for later use
Throwing an exception causes this setting to fail */
if (LocaleNCompare("iptc:",property,5) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
#endif
break; /* not an attribute, add as a property */
}
case 'K':
case 'k':
if (LocaleCompare("kurtosis",property) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
case 'L':
case 'l':
{
if (LocaleCompare("loop",property) == 0)
{
image->iterations=StringToUnsignedLong(value);
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
case 'M':
case 'm':
if ((LocaleCompare("magick",property) == 0) ||
(LocaleCompare("max",property) == 0) ||
(LocaleCompare("mean",property) == 0) ||
(LocaleCompare("min",property) == 0) ||
(LocaleCompare("min",property) == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
case 'O':
case 'o':
if (LocaleCompare("opaque",property) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
case 'P':
case 'p':
{
if (LocaleCompare("page",property) == 0)
{
char
*geometry;
geometry=GetPageGeometry(value);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
return(MagickTrue);
}
#if 0 /* Percent escape's sets values with this prefix: for later use
Throwing an exception causes this setting to fail */
if (LocaleNCompare("pixel:",property,6) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
#endif
if (LocaleCompare("profile",property) == 0)
{
ImageInfo
*image_info;
StringInfo
*profile;
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,value,MagickPathExtent);
(void) SetImageInfo(image_info,1,exception);
profile=FileToStringInfo(image_info->filename,~0UL,exception);
if (profile != (StringInfo *) NULL)
status=SetImageProfile(image,image_info->magick,profile,exception);
image_info=DestroyImageInfo(image_info);
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
case 'R':
case 'r':
{
if (LocaleCompare("rendering-intent",property) == 0)
{
ssize_t
rendering_intent;
rendering_intent=ParseCommandOption(MagickIntentOptions,MagickFalse,
value);
if (rendering_intent < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->rendering_intent=(RenderingIntent) rendering_intent;
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
case 'S':
case 's':
if ((LocaleCompare("size",property) == 0) ||
(LocaleCompare("skewness",property) == 0) ||
(LocaleCompare("scenes",property) == 0) ||
(LocaleCompare("standard-deviation",property) == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
case 'T':
case 't':
{
if (LocaleCompare("tile-offset",property) == 0)
{
char
*geometry;
geometry=GetPageGeometry(value);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
case 'U':
case 'u':
{
if (LocaleCompare("units",property) == 0)
{
ssize_t
units;
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,value);
if (units < 0)
return(MagickFalse); /* FUTURE: value exception?? */
image->units=(ResolutionType) units;
return(MagickTrue);
}
break; /* not an attribute, add as a property */
}
case 'V':
case 'v':
{
if (LocaleCompare("version",property) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
}
case 'W':
case 'w':
{
if (LocaleCompare("width",property) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
}
#if 0 /* Percent escape's sets values with this prefix: for later use
Throwing an exception causes this setting to fail */
case 'X':
case 'x':
{
if (LocaleNCompare("xmp:",property,4) == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SetReadOnlyProperty","`%s'",property);
return(MagickFalse);
}
break; /* not an attribute, add as a property */
}
#endif
}
/* Default: not an attribute, add as a property */
status=AddValueToSplayTree((SplayTreeInfo *) image->properties,
ConstantString(property),ConstantString(value));
/* FUTURE: error if status is bad? */
return(status);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_5485_0 |
crossvul-cpp_data_bad_2561_1 | /*
nicklist.c : irssi
Copyright (C) 1999-2000 Timo Sirainen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "module.h"
#include "signals.h"
#include "misc.h"
#include "servers.h"
#include "channels.h"
#include "nicklist.h"
#include "masks.h"
#define isalnumhigh(a) \
(i_isalnum(a) || (unsigned char) (a) >= 128)
static void nick_hash_add(CHANNEL_REC *channel, NICK_REC *nick)
{
NICK_REC *list;
nick->next = NULL;
list = g_hash_table_lookup(channel->nicks, nick->nick);
if (list == NULL)
g_hash_table_insert(channel->nicks, nick->nick, nick);
else {
/* multiple nicks with same name */
while (list->next != NULL)
list = list->next;
list->next = nick;
}
if (nick == channel->ownnick) {
/* move our own nick to beginning of the nick list.. */
nicklist_set_own(channel, nick);
}
}
static void nick_hash_remove(CHANNEL_REC *channel, NICK_REC *nick)
{
NICK_REC *list;
list = g_hash_table_lookup(channel->nicks, nick->nick);
if (list == NULL)
return;
if (list == nick || list->next == NULL) {
g_hash_table_remove(channel->nicks, nick->nick);
if (list->next != NULL) {
g_hash_table_insert(channel->nicks, nick->next->nick,
nick->next);
}
} else {
while (list->next != nick)
list = list->next;
list->next = nick->next;
}
}
/* Add new nick to list */
void nicklist_insert(CHANNEL_REC *channel, NICK_REC *nick)
{
/*MODULE_DATA_INIT(nick);*/
nick->type = module_get_uniq_id("NICK", 0);
nick->chat_type = channel->chat_type;
nick_hash_add(channel, nick);
signal_emit("nicklist new", 2, channel, nick);
}
/* Set host address for nick */
void nicklist_set_host(CHANNEL_REC *channel, NICK_REC *nick, const char *host)
{
g_return_if_fail(channel != NULL);
g_return_if_fail(nick != NULL);
g_return_if_fail(host != NULL);
g_free_not_null(nick->host);
nick->host = g_strdup(host);
signal_emit("nicklist host changed", 2, channel, nick);
}
static void nicklist_destroy(CHANNEL_REC *channel, NICK_REC *nick)
{
signal_emit("nicklist remove", 2, channel, nick);
if (channel->ownnick == nick)
channel->ownnick = NULL;
/*MODULE_DATA_DEINIT(nick);*/
g_free(nick->nick);
g_free_not_null(nick->realname);
g_free_not_null(nick->host);
g_free(nick);
}
/* Remove nick from list */
void nicklist_remove(CHANNEL_REC *channel, NICK_REC *nick)
{
g_return_if_fail(IS_CHANNEL(channel));
g_return_if_fail(nick != NULL);
nick_hash_remove(channel, nick);
nicklist_destroy(channel, nick);
}
static void nicklist_rename_list(SERVER_REC *server, void *new_nick_id,
const char *old_nick, const char *new_nick,
GSList *nicks)
{
CHANNEL_REC *channel;
NICK_REC *nickrec;
GSList *tmp;
for (tmp = nicks; tmp != NULL; tmp = tmp->next->next) {
channel = tmp->data;
nickrec = tmp->next->data;
/* remove old nick from hash table */
nick_hash_remove(channel, nickrec);
if (new_nick_id != NULL)
nickrec->unique_id = new_nick_id;
g_free(nickrec->nick);
nickrec->nick = g_strdup(new_nick);
/* add new nick to hash table */
nick_hash_add(channel, nickrec);
signal_emit("nicklist changed", 3, channel, nickrec, old_nick);
}
g_slist_free(nicks);
}
void nicklist_rename(SERVER_REC *server, const char *old_nick,
const char *new_nick)
{
nicklist_rename_list(server, NULL, old_nick, new_nick,
nicklist_get_same(server, old_nick));
}
void nicklist_rename_unique(SERVER_REC *server,
void *old_nick_id, const char *old_nick,
void *new_nick_id, const char *new_nick)
{
nicklist_rename_list(server, new_nick_id, old_nick, new_nick,
nicklist_get_same_unique(server, old_nick_id));
}
static NICK_REC *nicklist_find_wildcards(CHANNEL_REC *channel,
const char *mask)
{
NICK_REC *nick;
GHashTableIter iter;
g_hash_table_iter_init(&iter, channel->nicks);
while (g_hash_table_iter_next(&iter, NULL, (void*)&nick)) {
for (; nick != NULL; nick = nick->next) {
if (mask_match_address(channel->server, mask,
nick->nick, nick->host))
return nick;
}
}
return NULL;
}
GSList *nicklist_find_multiple(CHANNEL_REC *channel, const char *mask)
{
GSList *nicks;
NICK_REC *nick;
GHashTableIter iter;
g_return_val_if_fail(IS_CHANNEL(channel), NULL);
g_return_val_if_fail(mask != NULL, NULL);
nicks = NULL;
g_hash_table_iter_init(&iter, channel->nicks);
while (g_hash_table_iter_next(&iter, NULL, (void*)&nick)) {
for (; nick != NULL; nick = nick->next) {
if (mask_match_address(channel->server, mask,
nick->nick, nick->host))
nicks = g_slist_prepend(nicks, nick);
}
}
return nicks;
}
/* Find nick */
NICK_REC *nicklist_find(CHANNEL_REC *channel, const char *nick)
{
g_return_val_if_fail(IS_CHANNEL(channel), NULL);
g_return_val_if_fail(nick != NULL, NULL);
return g_hash_table_lookup(channel->nicks, nick);
}
NICK_REC *nicklist_find_unique(CHANNEL_REC *channel, const char *nick,
void *id)
{
NICK_REC *rec;
g_return_val_if_fail(IS_CHANNEL(channel), NULL);
g_return_val_if_fail(nick != NULL, NULL);
rec = g_hash_table_lookup(channel->nicks, nick);
while (rec != NULL && rec->unique_id != id)
rec = rec->next;
return rec;
}
/* Find nick mask, wildcards allowed */
NICK_REC *nicklist_find_mask(CHANNEL_REC *channel, const char *mask)
{
NICK_REC *nickrec;
char *nick, *host;
g_return_val_if_fail(IS_CHANNEL(channel), NULL);
g_return_val_if_fail(mask != NULL, NULL);
nick = g_strdup(mask);
host = strchr(nick, '!');
if (host != NULL) *host++ = '\0';
if (strchr(nick, '*') || strchr(nick, '?')) {
g_free(nick);
return nicklist_find_wildcards(channel, mask);
}
nickrec = g_hash_table_lookup(channel->nicks, nick);
if (host != NULL) {
while (nickrec != NULL) {
if (nickrec->host != NULL &&
match_wildcards(host, nickrec->host))
break; /* match */
nickrec = nickrec->next;
}
}
g_free(nick);
return nickrec;
}
static void get_nicks_hash(gpointer key, NICK_REC *rec, GSList **list)
{
while (rec != NULL) {
*list = g_slist_prepend(*list, rec);
rec = rec->next;
}
}
/* Get list of nicks */
GSList *nicklist_getnicks(CHANNEL_REC *channel)
{
GSList *list;
g_return_val_if_fail(IS_CHANNEL(channel), NULL);
list = NULL;
g_hash_table_foreach(channel->nicks, (GHFunc) get_nicks_hash, &list);
return list;
}
GSList *nicklist_get_same(SERVER_REC *server, const char *nick)
{
GSList *tmp;
GSList *list = NULL;
g_return_val_if_fail(IS_SERVER(server), NULL);
for (tmp = server->channels; tmp != NULL; tmp = tmp->next) {
NICK_REC *nick_rec;
CHANNEL_REC *channel = tmp->data;
for (nick_rec = g_hash_table_lookup(channel->nicks, nick);
nick_rec != NULL;
nick_rec = nick_rec->next) {
list = g_slist_append(list, channel);
list = g_slist_append(list, nick_rec);
}
}
return list;
}
typedef struct {
CHANNEL_REC *channel;
void *id;
GSList *list;
} NICKLIST_GET_SAME_UNIQUE_REC;
static void get_nicks_same_hash_unique(gpointer key, NICK_REC *nick,
NICKLIST_GET_SAME_UNIQUE_REC *rec)
{
while (nick != NULL) {
if (nick->unique_id == rec->id) {
rec->list = g_slist_append(rec->list, rec->channel);
rec->list = g_slist_append(rec->list, nick);
break;
}
nick = nick->next;
}
}
GSList *nicklist_get_same_unique(SERVER_REC *server, void *id)
{
NICKLIST_GET_SAME_UNIQUE_REC rec;
GSList *tmp;
g_return_val_if_fail(IS_SERVER(server), NULL);
g_return_val_if_fail(id != NULL, NULL);
rec.id = id;
rec.list = NULL;
for (tmp = server->channels; tmp != NULL; tmp = tmp->next) {
rec.channel = tmp->data;
g_hash_table_foreach(rec.channel->nicks,
(GHFunc) get_nicks_same_hash_unique,
&rec);
}
return rec.list;
}
/* nick record comparison for sort functions */
int nicklist_compare(NICK_REC *p1, NICK_REC *p2, const char *nick_prefix)
{
int i;
if (p1 == NULL) return -1;
if (p2 == NULL) return 1;
if (p1->prefixes[0] == p2->prefixes[0])
return g_ascii_strcasecmp(p1->nick, p2->nick);
if (!p1->prefixes[0])
return 1;
if (!p2->prefixes[0])
return -1;
/* They aren't equal. We've taken care of that already.
* The first one we encounter in this list is the greater.
*/
for (i = 0; nick_prefix[i] != '\0'; i++) {
if (p1->prefixes[0] == nick_prefix[i])
return -1;
if (p2->prefixes[0] == nick_prefix[i])
return 1;
}
/* we should never have gotten here... */
return g_ascii_strcasecmp(p1->nick, p2->nick);
}
static void nicklist_update_flags_list(SERVER_REC *server, int gone,
int serverop, GSList *nicks)
{
GSList *tmp;
CHANNEL_REC *channel;
NICK_REC *rec;
g_return_if_fail(IS_SERVER(server));
for (tmp = nicks; tmp != NULL; tmp = tmp->next->next) {
channel = tmp->data;
rec = tmp->next->data;
rec->last_check = time(NULL);
if (gone != -1 && (int)rec->gone != gone) {
rec->gone = gone;
signal_emit("nicklist gone changed", 2, channel, rec);
}
if (serverop != -1 && (int)rec->serverop != serverop) {
rec->serverop = serverop;
signal_emit("nicklist serverop changed", 2, channel, rec);
}
}
g_slist_free(nicks);
}
void nicklist_update_flags(SERVER_REC *server, const char *nick,
int gone, int serverop)
{
nicklist_update_flags_list(server, gone, serverop,
nicklist_get_same(server, nick));
}
void nicklist_update_flags_unique(SERVER_REC *server, void *id,
int gone, int serverop)
{
nicklist_update_flags_list(server, gone, serverop,
nicklist_get_same_unique(server, id));
}
/* Specify which nick in channel is ours */
void nicklist_set_own(CHANNEL_REC *channel, NICK_REC *nick)
{
NICK_REC *first, *next;
channel->ownnick = nick;
/* move our nick in the list to first, makes some things easier
(like handling multiple identical nicks in fe-messages.c) */
first = g_hash_table_lookup(channel->nicks, nick->nick);
if (first->next == NULL)
return;
next = nick->next;
nick->next = first;
while (first->next != nick)
first = first->next;
first->next = next;
g_hash_table_insert(channel->nicks, nick->nick, nick);
}
static void sig_channel_created(CHANNEL_REC *channel)
{
g_return_if_fail(IS_CHANNEL(channel));
channel->nicks = g_hash_table_new((GHashFunc) g_istr_hash,
(GCompareFunc) g_istr_equal);
}
static void nicklist_remove_hash(gpointer key, NICK_REC *nick,
CHANNEL_REC *channel)
{
NICK_REC *next;
while (nick != NULL) {
next = nick->next;
nicklist_destroy(channel, nick);
nick = next;
}
}
static void sig_channel_destroyed(CHANNEL_REC *channel)
{
g_return_if_fail(IS_CHANNEL(channel));
g_hash_table_foreach(channel->nicks,
(GHFunc) nicklist_remove_hash, channel);
g_hash_table_destroy(channel->nicks);
}
static NICK_REC *nick_nfind(CHANNEL_REC *channel, const char *nick, int len)
{
NICK_REC *rec;
char *tmpnick;
tmpnick = g_strndup(nick, len);
rec = g_hash_table_lookup(channel->nicks, tmpnick);
if (rec != NULL) {
/* if there's multiple, get the one with identical case */
while (rec->next != NULL) {
if (g_strcmp0(rec->nick, tmpnick) == 0)
break;
rec = rec->next;
}
}
g_free(tmpnick);
return rec;
}
/* Check is `msg' is meant for `nick'. */
int nick_match_msg(CHANNEL_REC *channel, const char *msg, const char *nick)
{
const char *msgstart, *orignick;
int len, fullmatch;
g_return_val_if_fail(nick != NULL, FALSE);
g_return_val_if_fail(msg != NULL, FALSE);
if (channel != NULL && channel->server->nick_match_msg != NULL)
return channel->server->nick_match_msg(msg, nick);
/* first check for identical match */
len = strlen(nick);
if (g_ascii_strncasecmp(msg, nick, len) == 0 &&
!isalnumhigh((int) msg[len]))
return TRUE;
orignick = nick;
for (;;) {
nick = orignick;
msgstart = msg;
fullmatch = TRUE;
/* check if it matches for alphanumeric parts of nick */
while (*nick != '\0' && *msg != '\0') {
if (i_toupper(*nick) == i_toupper(*msg)) {
/* total match */
msg++;
} else if (i_isalnum(*msg) && !i_isalnum(*nick)) {
/* some strange char in your nick, pass it */
fullmatch = FALSE;
} else
break;
nick++;
}
if (msg != msgstart && !isalnumhigh(*msg)) {
/* at least some of the chars in line matched the
nick, and msg continue with non-alphanum character,
this might be for us.. */
if (*nick != '\0') {
/* remove the rest of the non-alphanum chars
from nick and check if it then matches. */
fullmatch = FALSE;
while (*nick != '\0' && !i_isalnum(*nick))
nick++;
}
if (*nick == '\0') {
/* yes, match! */
break;
}
}
/* no match. check if this is a message to multiple people
(like nick1,nick2: text) */
while (*msg != '\0' && *msg != ' ' && *msg != ',') msg++;
if (*msg != ',') {
nick = orignick;
break;
}
msg++;
}
if (*nick != '\0')
return FALSE; /* didn't match */
if (fullmatch)
return TRUE; /* matched without fuzzyness */
if (channel != NULL) {
/* matched with some fuzzyness .. check if there's an exact match
for some other nick in the same channel. */
return nick_nfind(channel, msgstart, (int) (msg-msgstart)) == NULL;
} else {
return TRUE;
}
}
int nick_match_msg_everywhere(CHANNEL_REC *channel, const char *msg, const char *nick)
{
g_return_val_if_fail(nick != NULL, FALSE);
g_return_val_if_fail(msg != NULL, FALSE);
return stristr_full(msg, nick) != NULL;
}
void nicklist_init(void)
{
signal_add_first("channel created", (SIGNAL_FUNC) sig_channel_created);
signal_add("channel destroyed", (SIGNAL_FUNC) sig_channel_destroyed);
}
void nicklist_deinit(void)
{
signal_remove("channel created", (SIGNAL_FUNC) sig_channel_created);
signal_remove("channel destroyed", (SIGNAL_FUNC) sig_channel_destroyed);
module_uniq_destroy("NICK");
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_2561_1 |
crossvul-cpp_data_good_3060_18 | /* user_defined.c: user defined key type
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/user-type.h>
#include <asm/uaccess.h>
#include "internal.h"
static int logon_vet_description(const char *desc);
/*
* user defined keys take an arbitrary string as the description and an
* arbitrary blob of data as the payload
*/
struct key_type key_type_user = {
.name = "user",
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.update = user_update,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.read = user_read,
};
EXPORT_SYMBOL_GPL(key_type_user);
/*
* This key type is essentially the same as key_type_user, but it does
* not define a .read op. This is suitable for storing username and
* password pairs in the keyring that you do not want to be readable
* from userspace.
*/
struct key_type key_type_logon = {
.name = "logon",
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.update = user_update,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.vet_description = logon_vet_description,
};
EXPORT_SYMBOL_GPL(key_type_logon);
/*
* Preparse a user defined key payload
*/
int user_preparse(struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload;
size_t datalen = prep->datalen;
if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
if (!upayload)
return -ENOMEM;
/* attach the data */
prep->quotalen = datalen;
prep->payload[0] = upayload;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
return 0;
}
EXPORT_SYMBOL_GPL(user_preparse);
/*
* Free a preparse of a user defined key payload
*/
void user_free_preparse(struct key_preparsed_payload *prep)
{
kfree(prep->payload[0]);
}
EXPORT_SYMBOL_GPL(user_free_preparse);
/*
* update a user defined key
* - the key's semaphore is write-locked
*/
int user_update(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload, *zap;
size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
/* construct a replacement payload */
ret = -ENOMEM;
upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
if (!upayload)
goto error;
upayload->datalen = datalen;
memcpy(upayload->data, prep->data, datalen);
/* check the quota and attach the new data */
zap = upayload;
ret = key_payload_reserve(key, datalen);
if (ret == 0) {
/* attach the new data, displacing the old */
zap = key->payload.data;
rcu_assign_keypointer(key, upayload);
key->expiry = 0;
}
if (zap)
kfree_rcu(zap, rcu);
error:
return ret;
}
EXPORT_SYMBOL_GPL(user_update);
/*
* dispose of the links from a revoked keyring
* - called with the key sem write-locked
*/
void user_revoke(struct key *key)
{
struct user_key_payload *upayload = key->payload.data;
/* clear the quota */
key_payload_reserve(key, 0);
if (upayload) {
rcu_assign_keypointer(key, NULL);
kfree_rcu(upayload, rcu);
}
}
EXPORT_SYMBOL(user_revoke);
/*
* dispose of the data dangling from the corpse of a user key
*/
void user_destroy(struct key *key)
{
struct user_key_payload *upayload = key->payload.data;
kfree(upayload);
}
EXPORT_SYMBOL_GPL(user_destroy);
/*
* describe the user key
*/
void user_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
if (key_is_instantiated(key))
seq_printf(m, ": %u", key->datalen);
}
EXPORT_SYMBOL_GPL(user_describe);
/*
* read the key data
* - the key's semaphore is read-locked
*/
long user_read(const struct key *key, char __user *buffer, size_t buflen)
{
struct user_key_payload *upayload;
long ret;
upayload = rcu_dereference_key(key);
ret = upayload->datalen;
/* we can return the data as is */
if (buffer && buflen > 0) {
if (buflen > upayload->datalen)
buflen = upayload->datalen;
if (copy_to_user(buffer, upayload->data, buflen) != 0)
ret = -EFAULT;
}
return ret;
}
EXPORT_SYMBOL_GPL(user_read);
/* Vet the description for a "logon" key */
static int logon_vet_description(const char *desc)
{
char *p;
/* require a "qualified" description string */
p = strchr(desc, ':');
if (!p)
return -EINVAL;
/* also reject description with ':' as first char */
if (p == desc)
return -EINVAL;
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3060_18 |
crossvul-cpp_data_good_5350_0 | /*
* algif_hash: User-space interface for hash algorithms
*
* This file provides the user-space API for hash algorithms.
*
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/hash.h>
#include <crypto/if_alg.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct hash_ctx {
struct af_alg_sgl sgl;
u8 *result;
struct af_alg_completion completion;
unsigned int len;
bool more;
struct ahash_request req;
};
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
int limit = ALG_MAX_PAGES * PAGE_SIZE;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
long copied = 0;
int err;
if (limit > sk->sk_sndbuf)
limit = sk->sk_sndbuf;
lock_sock(sk);
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
if (err)
goto unlock;
}
ctx->more = 0;
while (msg_data_left(msg)) {
int len = msg_data_left(msg);
if (len > limit)
len = limit;
len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len);
if (len < 0) {
err = copied ? 0 : len;
goto unlock;
}
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len);
err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req),
&ctx->completion);
af_alg_free_sg(&ctx->sgl);
if (err)
goto unlock;
copied += len;
iov_iter_advance(&msg->msg_iter, len);
}
err = 0;
ctx->more = msg->msg_flags & MSG_MORE;
if (!ctx->more) {
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
}
unlock:
release_sock(sk);
return err ?: copied;
}
static ssize_t hash_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
int err;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
lock_sock(sk);
sg_init_table(ctx->sgl.sg, 1);
sg_set_page(ctx->sgl.sg, page, size, offset);
ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size);
if (!(flags & MSG_MORE)) {
if (ctx->more)
err = crypto_ahash_finup(&ctx->req);
else
err = crypto_ahash_digest(&ctx->req);
} else {
if (!ctx->more) {
err = crypto_ahash_init(&ctx->req);
if (err)
goto unlock;
}
err = crypto_ahash_update(&ctx->req);
}
err = af_alg_wait_for_completion(err, &ctx->completion);
if (err)
goto unlock;
ctx->more = flags & MSG_MORE;
unlock:
release_sock(sk);
return err ?: size;
}
static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
int err;
if (len > ds)
len = ds;
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
lock_sock(sk);
if (ctx->more) {
ctx->more = 0;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req),
&ctx->completion);
if (err)
goto unlock;
}
err = memcpy_to_msg(msg, ctx->result, len);
unlock:
release_sock(sk);
return err ?: len;
}
static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
bool more;
int err;
lock_sock(sk);
more = ctx->more;
err = more ? crypto_ahash_export(req, state) : 0;
release_sock(sk);
if (err)
return err;
err = af_alg_accept(ask->parent, newsock);
if (err)
return err;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
ctx2 = ask2->private;
ctx2->more = more;
if (!more)
return err;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
return err;
}
static struct proto_ops algif_hash_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.setsockopt = sock_no_setsockopt,
.poll = sock_no_poll,
.release = af_alg_release,
.sendmsg = hash_sendmsg,
.sendpage = hash_sendpage,
.recvmsg = hash_recvmsg,
.accept = hash_accept,
};
static void *hash_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_ahash(name, type, mask);
}
static void hash_release(void *private)
{
crypto_free_ahash(private);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_ahash_setkey(private, key, keylen);
}
static void hash_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
sock_kzfree_s(sk, ctx->result,
crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int hash_accept_parent(void *private, struct sock *sk)
{
struct hash_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(private);
unsigned ds = crypto_ahash_digestsize(private);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
if (!ctx->result) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->result, 0, ds);
ctx->len = len;
ctx->more = 0;
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, private);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = hash_sock_destruct;
return 0;
}
static const struct af_alg_type algif_type_hash = {
.bind = hash_bind,
.release = hash_release,
.setkey = hash_setkey,
.accept = hash_accept_parent,
.ops = &algif_hash_ops,
.name = "hash",
.owner = THIS_MODULE
};
static int __init algif_hash_init(void)
{
return af_alg_register_type(&algif_type_hash);
}
static void __exit algif_hash_exit(void)
{
int err = af_alg_unregister_type(&algif_type_hash);
BUG_ON(err);
}
module_init(algif_hash_init);
module_exit(algif_hash_exit);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_5350_0 |
crossvul-cpp_data_good_3060_15 | /* Request a key from userspace
*
* Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* See Documentation/security/keys-request-key.txt
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kmod.h>
#include <linux/err.h>
#include <linux/keyctl.h>
#include <linux/slab.h>
#include "internal.h"
#define key_negative_timeout 60 /* default timeout on a negative key's existence */
/*
* wait_on_bit() sleep function for uninterruptible waiting
*/
static int key_wait_bit(void *flags)
{
schedule();
return 0;
}
/*
* wait_on_bit() sleep function for interruptible waiting
*/
static int key_wait_bit_intr(void *flags)
{
schedule();
return signal_pending(current) ? -ERESTARTSYS : 0;
}
/**
* complete_request_key - Complete the construction of a key.
* @cons: The key construction record.
* @error: The success or failute of the construction.
*
* Complete the attempt to construct a key. The key will be negated
* if an error is indicated. The authorisation key will be revoked
* unconditionally.
*/
void complete_request_key(struct key_construction *cons, int error)
{
kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error);
if (error < 0)
key_negate_and_link(cons->key, key_negative_timeout, NULL,
cons->authkey);
else
key_revoke(cons->authkey);
key_put(cons->key);
key_put(cons->authkey);
kfree(cons);
}
EXPORT_SYMBOL(complete_request_key);
/*
* Initialise a usermode helper that is going to have a specific session
* keyring.
*
* This is called in context of freshly forked kthread before kernel_execve(),
* so we can simply install the desired session_keyring at this point.
*/
static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
{
struct key *keyring = info->data;
return install_session_keyring_to_cred(cred, keyring);
}
/*
* Clean up a usermode helper with session keyring.
*/
static void umh_keys_cleanup(struct subprocess_info *info)
{
struct key *keyring = info->data;
key_put(keyring);
}
/*
* Call a usermode helper with a specific session keyring.
*/
static int call_usermodehelper_keys(char *path, char **argv, char **envp,
struct key *session_keyring, int wait)
{
struct subprocess_info *info;
info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
umh_keys_init, umh_keys_cleanup,
session_keyring);
if (!info)
return -ENOMEM;
key_get(session_keyring);
return call_usermodehelper_exec(info, wait);
}
/*
* Request userspace finish the construction of a key
* - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
*/
static int call_sbin_request_key(struct key_construction *cons,
const char *op,
void *aux)
{
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
struct key *key = cons->key, *authkey = cons->authkey, *keyring,
*session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
int ret, i;
kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
ret = install_user_keyrings();
if (ret < 0)
goto error_alloc;
/* allocate a new session keyring */
sprintf(desc, "_req.%u", key->serial);
cred = get_current_cred();
keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
put_cred(cred);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error_alloc;
}
/* attach the auth key to the session keyring */
ret = key_link(keyring, authkey);
if (ret < 0)
goto error_link;
/* record the UID and GID */
sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
/* we say which key is under construction */
sprintf(key_str, "%d", key->serial);
/* we specify the process's default keyrings */
sprintf(keyring_str[0], "%d",
cred->thread_keyring ? cred->thread_keyring->serial : 0);
prkey = 0;
if (cred->process_keyring)
prkey = cred->process_keyring->serial;
sprintf(keyring_str[1], "%d", prkey);
rcu_read_lock();
session = rcu_dereference(cred->session_keyring);
if (!session)
session = cred->user->session_keyring;
sskey = session->serial;
rcu_read_unlock();
sprintf(keyring_str[2], "%d", sskey);
/* set up a minimal environment */
i = 0;
envp[i++] = "HOME=/";
envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[i] = NULL;
/* set up the argument list */
i = 0;
argv[i++] = "/sbin/request-key";
argv[i++] = (char *) op;
argv[i++] = key_str;
argv[i++] = uid_str;
argv[i++] = gid_str;
argv[i++] = keyring_str[0];
argv[i++] = keyring_str[1];
argv[i++] = keyring_str[2];
argv[i] = NULL;
/* do it */
ret = call_usermodehelper_keys(argv[0], argv, envp, keyring,
UMH_WAIT_PROC);
kdebug("usermode -> 0x%x", ret);
if (ret >= 0) {
/* ret is the exit/wait code */
if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags) ||
key_validate(key) < 0)
ret = -ENOKEY;
else
/* ignore any errors from userspace if the key was
* instantiated */
ret = 0;
}
error_link:
key_put(keyring);
error_alloc:
complete_request_key(cons, ret);
kleave(" = %d", ret);
return ret;
}
/*
* Call out to userspace for key construction.
*
* Program failure is ignored in favour of key status.
*/
static int construct_key(struct key *key, const void *callout_info,
size_t callout_len, void *aux,
struct key *dest_keyring)
{
struct key_construction *cons;
request_key_actor_t actor;
struct key *authkey;
int ret;
kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
cons = kmalloc(sizeof(*cons), GFP_KERNEL);
if (!cons)
return -ENOMEM;
/* allocate an authorisation key */
authkey = request_key_auth_new(key, callout_info, callout_len,
dest_keyring);
if (IS_ERR(authkey)) {
kfree(cons);
ret = PTR_ERR(authkey);
authkey = NULL;
} else {
cons->authkey = key_get(authkey);
cons->key = key_get(key);
/* make the call */
actor = call_sbin_request_key;
if (key->type->request_key)
actor = key->type->request_key;
ret = actor(cons, "create", aux);
/* check that the actor called complete_request_key() prior to
* returning an error */
WARN_ON(ret < 0 &&
!test_bit(KEY_FLAG_REVOKED, &authkey->flags));
key_put(authkey);
}
kleave(" = %d", ret);
return ret;
}
/*
* Get the appropriate destination keyring for the request.
*
* The keyring selected is returned with an extra reference upon it which the
* caller must release.
*/
static void construct_get_dest_keyring(struct key **_dest_keyring)
{
struct request_key_auth *rka;
const struct cred *cred = current_cred();
struct key *dest_keyring = *_dest_keyring, *authkey;
kenter("%p", dest_keyring);
/* find the appropriate keyring */
if (dest_keyring) {
/* the caller supplied one */
key_get(dest_keyring);
} else {
/* use a default keyring; falling through the cases until we
* find one that we actually have */
switch (cred->jit_keyring) {
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
if (cred->request_key_auth) {
authkey = cred->request_key_auth;
down_read(&authkey->sem);
rka = authkey->payload.data;
if (!test_bit(KEY_FLAG_REVOKED,
&authkey->flags))
dest_keyring =
key_get(rka->dest_keyring);
up_read(&authkey->sem);
if (dest_keyring)
break;
}
case KEY_REQKEY_DEFL_THREAD_KEYRING:
dest_keyring = key_get(cred->thread_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
rcu_dereference(cred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
dest_keyring =
key_get(cred->user->session_keyring);
break;
case KEY_REQKEY_DEFL_USER_KEYRING:
dest_keyring = key_get(cred->user->uid_keyring);
break;
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
BUG();
}
}
*_dest_keyring = dest_keyring;
kleave(" [dk %d]", key_serial(dest_keyring));
return;
}
/*
* Allocate a new key in under-construction state and attempt to link it in to
* the requested keyring.
*
* May return a key that's already under construction instead if there was a
* race between two thread calling request_key().
*/
static int construct_alloc_key(struct keyring_search_context *ctx,
struct key *dest_keyring,
unsigned long flags,
struct key_user *user,
struct key **_key)
{
struct assoc_array_edit *edit;
struct key *key;
key_perm_t perm;
key_ref_t key_ref;
int ret;
kenter("%s,%s,,,",
ctx->index_key.type->name, ctx->index_key.description);
*_key = NULL;
mutex_lock(&user->cons_lock);
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
if (ctx->index_key.type->read)
perm |= KEY_POS_READ;
if (ctx->index_key.type == &key_type_keyring ||
ctx->index_key.type->update)
perm |= KEY_POS_WRITE;
key = key_alloc(ctx->index_key.type, ctx->index_key.description,
ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
perm, flags);
if (IS_ERR(key))
goto alloc_failed;
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
if (ret < 0)
goto link_prealloc_failed;
}
/* attach the key to the destination keyring under lock, but we do need
* to do another check just in case someone beat us to it whilst we
* waited for locks */
mutex_lock(&key_construction_mutex);
key_ref = search_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto key_already_present;
if (dest_keyring)
__key_link(key, &edit);
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
__key_link_end(dest_keyring, &ctx->index_key, edit);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
return 0;
/* the key is now present - we tell the caller that we found it by
* returning -EINPROGRESS */
key_already_present:
key_put(key);
mutex_unlock(&key_construction_mutex);
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
ret = __key_link_check_live_key(dest_keyring, key);
if (ret == 0)
__key_link(key, &edit);
__key_link_end(dest_keyring, &ctx->index_key, edit);
if (ret < 0)
goto link_check_failed;
}
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = -EINPROGRESS [%d]", key_serial(key));
return -EINPROGRESS;
link_check_failed:
mutex_unlock(&user->cons_lock);
key_put(key);
kleave(" = %d [linkcheck]", ret);
return ret;
link_prealloc_failed:
mutex_unlock(&user->cons_lock);
kleave(" = %d [prelink]", ret);
return ret;
alloc_failed:
mutex_unlock(&user->cons_lock);
kleave(" = %ld", PTR_ERR(key));
return PTR_ERR(key);
}
/*
* Commence key construction.
*/
static struct key *construct_key_and_link(struct keyring_search_context *ctx,
const char *callout_info,
size_t callout_len,
void *aux,
struct key *dest_keyring,
unsigned long flags)
{
struct key_user *user;
struct key *key;
int ret;
kenter("");
user = key_user_lookup(current_fsuid());
if (!user)
return ERR_PTR(-ENOMEM);
construct_get_dest_keyring(&dest_keyring);
ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
key_user_put(user);
if (ret == 0) {
ret = construct_key(key, callout_info, callout_len, aux,
dest_keyring);
if (ret < 0) {
kdebug("cons failed");
goto construction_failed;
}
} else if (ret == -EINPROGRESS) {
ret = 0;
} else {
goto couldnt_alloc_key;
}
key_put(dest_keyring);
kleave(" = key %d", key_serial(key));
return key;
construction_failed:
key_negate_and_link(key, key_negative_timeout, NULL, NULL);
key_put(key);
couldnt_alloc_key:
key_put(dest_keyring);
kleave(" = %d", ret);
return ERR_PTR(ret);
}
/**
* request_key_and_link - Request a key and cache it in a keyring.
* @type: The type of key we want.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
* @dest_keyring: Where to cache the key.
* @flags: Flags to key_alloc().
*
* A key matching the specified criteria is searched for in the process's
* keyrings and returned with its usage count incremented if found. Otherwise,
* if callout_info is not NULL, a key will be allocated and some service
* (probably in userspace) will be asked to instantiate it.
*
* If successfully found or created, the key will be linked to the destination
* keyring if one is provided.
*
* Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
* or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
* found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
* if insufficient key quota was available to create a new key; or -ENOMEM if
* insufficient memory was available.
*
* If the returned key was created, then it may still be under construction,
* and wait_for_key_construction() should be used to wait for that to complete.
*/
struct key *request_key_and_link(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len,
void *aux,
struct key *dest_keyring,
unsigned long flags)
{
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
};
struct key *key;
key_ref_t key_ref;
int ret;
kenter("%s,%s,%p,%zu,%p,%p,%lx",
ctx.index_key.type->name, ctx.index_key.description,
callout_info, callout_len, aux, dest_keyring, flags);
if (type->match_preparse) {
ret = type->match_preparse(&ctx.match_data);
if (ret < 0) {
key = ERR_PTR(ret);
goto error;
}
}
/* search all the process keyrings for a key */
key_ref = search_process_keyrings(&ctx);
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
if (dest_keyring) {
construct_get_dest_keyring(&dest_keyring);
ret = key_link(dest_keyring, key);
key_put(dest_keyring);
if (ret < 0) {
key_put(key);
key = ERR_PTR(ret);
goto error_free;
}
}
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
} else {
/* the search failed, but the keyrings were searchable, so we
* should consult userspace if we can */
key = ERR_PTR(-ENOKEY);
if (!callout_info)
goto error_free;
key = construct_key_and_link(&ctx, callout_info, callout_len,
aux, dest_keyring, flags);
}
error_free:
if (type->match_free)
type->match_free(&ctx.match_data);
error:
kleave(" = %p", key);
return key;
}
/**
* wait_for_key_construction - Wait for construction of a key to complete
* @key: The key being waited for.
* @intr: Whether to wait interruptibly.
*
* Wait for a key to finish being constructed.
*
* Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
* if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
* revoked or expired.
*/
int wait_for_key_construction(struct key *key, bool intr)
{
int ret;
ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
intr ? key_wait_bit_intr : key_wait_bit,
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret < 0)
return ret;
if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
smp_rmb();
return key->type_data.reject_error;
}
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
/**
* request_key - Request a key and wait for construction
* @type: Type of key.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found, new keys are always allocated in the user's quota,
* the callout_info must be a NUL-terminated string and no auxiliary data can
* be passed.
*
* Furthermore, it then works as wait_for_key_construction() to wait for the
* completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key(struct key_type *type,
const char *description,
const char *callout_info)
{
struct key *key;
size_t callout_len = 0;
int ret;
if (callout_info)
callout_len = strlen(callout_info);
key = request_key_and_link(type, description, callout_info, callout_len,
NULL, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
if (ret < 0) {
key_put(key);
return ERR_PTR(ret);
}
}
return key;
}
EXPORT_SYMBOL(request_key);
/**
* request_key_with_auxdata - Request a key with auxiliary data for the upcaller
* @type: The type of key we want.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found and new keys are always allocated in the user's quota.
*
* Furthermore, it then works as wait_for_key_construction() to wait for the
* completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len,
void *aux)
{
struct key *key;
int ret;
key = request_key_and_link(type, description, callout_info, callout_len,
aux, NULL, KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key)) {
ret = wait_for_key_construction(key, false);
if (ret < 0) {
key_put(key);
return ERR_PTR(ret);
}
}
return key;
}
EXPORT_SYMBOL(request_key_with_auxdata);
/*
* request_key_async - Request a key (allow async construction)
* @type: Type of key.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found, new keys are always allocated in the user's quota and
* no auxiliary data can be passed.
*
* The caller should call wait_for_key_construction() to wait for the
* completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len)
{
return request_key_and_link(type, description, callout_info,
callout_len, NULL, NULL,
KEY_ALLOC_IN_QUOTA);
}
EXPORT_SYMBOL(request_key_async);
/*
* request a key with auxiliary data for the upcaller (allow async construction)
* @type: Type of key.
* @description: The searchable description of the key.
* @callout_info: The data to pass to the instantiation upcall (or NULL).
* @callout_len: The length of callout_info.
* @aux: Auxiliary data for the upcall.
*
* As for request_key_and_link() except that it does not add the returned key
* to a keyring if found and new keys are always allocated in the user's quota.
*
* The caller should call wait_for_key_construction() to wait for the
* completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async_with_auxdata(struct key_type *type,
const char *description,
const void *callout_info,
size_t callout_len,
void *aux)
{
return request_key_and_link(type, description, callout_info,
callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA);
}
EXPORT_SYMBOL(request_key_async_with_auxdata);
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3060_15 |
crossvul-cpp_data_good_5715_1 | /**
* FreeRDP: A Remote Desktop Protocol Implementation
* RDP Server Peer
*
* Copyright 2011 Vic Lee
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <winpr/crt.h>
#include "certificate.h"
#include <freerdp/utils/tcp.h>
#include "peer.h"
static BOOL freerdp_peer_initialize(freerdp_peer* client)
{
client->context->rdp->settings->ServerMode = TRUE;
client->context->rdp->settings->FrameAcknowledge = 0;
client->context->rdp->settings->LocalConnection = client->local;
client->context->rdp->state = CONNECTION_STATE_INITIAL;
if (client->context->rdp->settings->RdpKeyFile != NULL)
{
client->context->rdp->settings->RdpServerRsaKey =
key_new(client->context->rdp->settings->RdpKeyFile);
}
return TRUE;
}
static BOOL freerdp_peer_get_fds(freerdp_peer* client, void** rfds, int* rcount)
{
rfds[*rcount] = (void*)(long)(client->context->rdp->transport->TcpIn->sockfd);
(*rcount)++;
return TRUE;
}
static BOOL freerdp_peer_check_fds(freerdp_peer* client)
{
int status;
rdpRdp* rdp;
rdp = client->context->rdp;
status = rdp_check_fds(rdp);
if (status < 0)
return FALSE;
return TRUE;
}
static BOOL peer_recv_data_pdu(freerdp_peer* client, wStream* s)
{
BYTE type;
UINT16 length;
UINT32 share_id;
BYTE compressed_type;
UINT16 compressed_len;
if (!rdp_read_share_data_header(s, &length, &type, &share_id, &compressed_type, &compressed_len))
return FALSE;
switch (type)
{
case DATA_PDU_TYPE_SYNCHRONIZE:
if (!rdp_recv_client_synchronize_pdu(client->context->rdp, s))
return FALSE;
break;
case DATA_PDU_TYPE_CONTROL:
if (!rdp_server_accept_client_control_pdu(client->context->rdp, s))
return FALSE;
break;
case DATA_PDU_TYPE_INPUT:
if (!input_recv(client->context->rdp->input, s))
return FALSE;
break;
case DATA_PDU_TYPE_BITMAP_CACHE_PERSISTENT_LIST:
/* TODO: notify server bitmap cache data */
break;
case DATA_PDU_TYPE_FONT_LIST:
if (!rdp_server_accept_client_font_list_pdu(client->context->rdp, s))
return FALSE;
if (!client->connected)
{
/**
* PostConnect should only be called once and should not be called
* after a reactivation sequence.
*/
IFCALLRET(client->PostConnect, client->connected, client);
if (!client->connected)
return FALSE;
}
if (!client->activated)
{
/* Activate will be called everytime after the client is activated/reactivated. */
IFCALLRET(client->Activate, client->activated, client);
if (!client->activated)
return FALSE;
}
break;
case DATA_PDU_TYPE_SHUTDOWN_REQUEST:
mcs_send_disconnect_provider_ultimatum(client->context->rdp->mcs);
return FALSE;
case DATA_PDU_TYPE_FRAME_ACKNOWLEDGE:
if(Stream_GetRemainingLength(s) < 4)
return FALSE;
Stream_Read_UINT32(s, client->ack_frame_id);
IFCALL(client->update->SurfaceFrameAcknowledge, client->update->context, client->ack_frame_id);
break;
case DATA_PDU_TYPE_REFRESH_RECT:
if (!update_read_refresh_rect(client->update, s))
return FALSE;
break;
case DATA_PDU_TYPE_SUPPRESS_OUTPUT:
if (!update_read_suppress_output(client->update, s))
return FALSE;
break;
default:
fprintf(stderr, "Data PDU type %d\n", type);
break;
}
return TRUE;
}
static int peer_recv_tpkt_pdu(freerdp_peer* client, wStream* s)
{
rdpRdp* rdp;
UINT16 length;
UINT16 pduType;
UINT16 pduLength;
UINT16 pduSource;
UINT16 channelId;
UINT16 securityFlags;
rdp = client->context->rdp;
if (!rdp_read_header(rdp, s, &length, &channelId))
{
fprintf(stderr, "Incorrect RDP header.\n");
return -1;
}
if (rdp->settings->DisableEncryption)
{
if (!rdp_read_security_header(s, &securityFlags))
return -1;
if (securityFlags & SEC_ENCRYPT)
{
if (!rdp_decrypt(rdp, s, length - 4, securityFlags))
{
fprintf(stderr, "rdp_decrypt failed\n");
return -1;
}
}
}
if (channelId != MCS_GLOBAL_CHANNEL_ID)
{
if(!freerdp_channel_peer_process(client, s, channelId))
return -1;
}
else
{
if (!rdp_read_share_control_header(s, &pduLength, &pduType, &pduSource))
return -1;
client->settings->PduSource = pduSource;
switch (pduType)
{
case PDU_TYPE_DATA:
if (!peer_recv_data_pdu(client, s))
return -1;
break;
default:
fprintf(stderr, "Client sent pduType %d\n", pduType);
return -1;
}
}
return 0;
}
static int peer_recv_fastpath_pdu(freerdp_peer* client, wStream* s)
{
rdpRdp* rdp;
UINT16 length;
rdpFastPath* fastpath;
rdp = client->context->rdp;
fastpath = rdp->fastpath;
//if (!fastpath_read_header_rdp(fastpath, s, &length))
// return -1;
fastpath_read_header_rdp(fastpath, s, &length);
if ((length == 0) || (length > Stream_GetRemainingLength(s)))
{
fprintf(stderr, "incorrect FastPath PDU header length %d\n", length);
return -1;
}
if (fastpath->encryptionFlags & FASTPATH_OUTPUT_ENCRYPTED)
{
if (!rdp_decrypt(rdp, s, length, (fastpath->encryptionFlags & FASTPATH_OUTPUT_SECURE_CHECKSUM) ? SEC_SECURE_CHECKSUM : 0))
return -1;
}
return fastpath_recv_inputs(fastpath, s);
}
static int peer_recv_pdu(freerdp_peer* client, wStream* s)
{
if (tpkt_verify_header(s))
return peer_recv_tpkt_pdu(client, s);
else
return peer_recv_fastpath_pdu(client, s);
}
static int peer_recv_callback(rdpTransport* transport, wStream* s, void* extra)
{
freerdp_peer* client = (freerdp_peer*) extra;
rdpRdp* rdp = client->context->rdp;
switch (rdp->state)
{
case CONNECTION_STATE_INITIAL:
if (!rdp_server_accept_nego(rdp, s))
return -1;
if (rdp->nego->selected_protocol & PROTOCOL_NLA)
{
sspi_CopyAuthIdentity(&client->identity, &(rdp->nego->transport->credssp->identity));
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, TRUE);
credssp_free(rdp->nego->transport->credssp);
rdp->nego->transport->credssp = NULL;
}
else
{
IFCALLRET(client->Logon, client->authenticated, client, &client->identity, FALSE);
}
break;
case CONNECTION_STATE_NEGO:
if (!rdp_server_accept_mcs_connect_initial(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CONNECT:
if (!rdp_server_accept_mcs_erect_domain_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ERECT_DOMAIN:
if (!rdp_server_accept_mcs_attach_user_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_ATTACH_USER:
if (!rdp_server_accept_mcs_channel_join_request(rdp, s))
return -1;
break;
case CONNECTION_STATE_MCS_CHANNEL_JOIN:
if (rdp->settings->DisableEncryption)
{
if (!rdp_server_accept_client_keys(rdp, s))
return -1;
break;
}
rdp->state = CONNECTION_STATE_ESTABLISH_KEYS;
/* FALLTHROUGH */
case CONNECTION_STATE_ESTABLISH_KEYS:
if (!rdp_server_accept_client_info(rdp, s))
return -1;
IFCALL(client->Capabilities, client);
if (!rdp_send_demand_active(rdp))
return -1;
break;
case CONNECTION_STATE_LICENSE:
if (!rdp_server_accept_confirm_active(rdp, s))
{
/**
* During reactivation sequence the client might sent some input or channel data
* before receiving the Deactivate All PDU. We need to process them as usual.
*/
Stream_SetPosition(s, 0);
return peer_recv_pdu(client, s);
}
break;
case CONNECTION_STATE_ACTIVE:
if (peer_recv_pdu(client, s) < 0)
return -1;
break;
default:
fprintf(stderr, "Invalid state %d\n", rdp->state);
return -1;
}
return 0;
}
static BOOL freerdp_peer_close(freerdp_peer* client)
{
/**
* [MS-RDPBCGR] 1.3.1.4.2 User-Initiated Disconnection Sequence on Server
* The server first sends the client a Deactivate All PDU followed by an
* optional MCS Disconnect Provider Ultimatum PDU.
*/
if (!rdp_send_deactivate_all(client->context->rdp))
return FALSE;
return mcs_send_disconnect_provider_ultimatum(client->context->rdp->mcs);
}
static void freerdp_peer_disconnect(freerdp_peer* client)
{
transport_disconnect(client->context->rdp->transport);
}
static int freerdp_peer_send_channel_data(freerdp_peer* client, int channelId, BYTE* data, int size)
{
return rdp_send_channel_data(client->context->rdp, channelId, data, size);
}
void freerdp_peer_context_new(freerdp_peer* client)
{
rdpRdp* rdp;
rdp = rdp_new(NULL);
client->input = rdp->input;
client->update = rdp->update;
client->settings = rdp->settings;
client->context = (rdpContext*) malloc(client->ContextSize);
ZeroMemory(client->context, client->ContextSize);
client->context->rdp = rdp;
client->context->peer = client;
client->context->input = client->input;
client->context->update = client->update;
client->context->settings = client->settings;
client->update->context = client->context;
client->input->context = client->context;
update_register_server_callbacks(client->update);
transport_attach(rdp->transport, client->sockfd);
rdp->transport->ReceiveCallback = peer_recv_callback;
rdp->transport->ReceiveExtra = client;
transport_set_blocking_mode(rdp->transport, FALSE);
IFCALL(client->ContextNew, client, client->context);
}
void freerdp_peer_context_free(freerdp_peer* client)
{
IFCALL(client->ContextFree, client, client->context);
}
freerdp_peer* freerdp_peer_new(int sockfd)
{
freerdp_peer* client;
client = (freerdp_peer*) malloc(sizeof(freerdp_peer));
ZeroMemory(client, sizeof(freerdp_peer));
freerdp_tcp_set_no_delay(sockfd, TRUE);
if (client != NULL)
{
client->sockfd = sockfd;
client->ContextSize = sizeof(rdpContext);
client->Initialize = freerdp_peer_initialize;
client->GetFileDescriptor = freerdp_peer_get_fds;
client->CheckFileDescriptor = freerdp_peer_check_fds;
client->Close = freerdp_peer_close;
client->Disconnect = freerdp_peer_disconnect;
client->SendChannelData = freerdp_peer_send_channel_data;
}
return client;
}
void freerdp_peer_free(freerdp_peer* client)
{
if (client)
{
rdp_free(client->context->rdp);
free(client->context);
free(client);
}
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_5715_1 |
crossvul-cpp_data_good_1863_0 | /*
* algif_skcipher: User-space interface for skcipher algorithms
*
* This file provides the user-space API for symmetric key ciphers.
*
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/if_alg.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct skcipher_sg_list {
struct list_head list;
int cur;
struct scatterlist sg[0];
};
struct skcipher_tfm {
struct crypto_skcipher *skcipher;
bool has_key;
};
struct skcipher_ctx {
struct list_head tsgl;
struct af_alg_sgl rsgl;
void *iv;
struct af_alg_completion completion;
atomic_t inflight;
size_t used;
unsigned int len;
bool more;
bool merge;
bool enc;
struct skcipher_request req;
};
struct skcipher_async_rsgl {
struct af_alg_sgl sgl;
struct list_head list;
};
struct skcipher_async_req {
struct kiocb *iocb;
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
char iv[];
};
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
#define GET_REQ_SIZE(ctx) \
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
#define GET_IV_SIZE(ctx) \
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
{
struct skcipher_async_rsgl *rsgl, *tmp;
struct scatterlist *sgl;
struct scatterlist *sg;
int i, n;
list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
af_alg_free_sg(&rsgl->sgl);
if (rsgl != &sreq->first_sgl)
kfree(rsgl);
}
sgl = sreq->tsg;
n = sg_nents(sgl);
for_each_sg(sgl, sg, n, i)
put_page(sg_page(sg));
kfree(sreq->tsg);
}
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
struct sock *sk = req->data;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
struct kiocb *iocb = sreq->iocb;
atomic_dec(&ctx->inflight);
skcipher_free_async_sgls(sreq);
kfree(req);
iocb->ki_complete(iocb, err, err);
}
static inline int skcipher_sndbuf(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
ctx->used, 0);
}
static inline bool skcipher_writable(struct sock *sk)
{
return PAGE_SIZE <= skcipher_sndbuf(sk);
}
static int skcipher_alloc_sgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg = NULL;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
if (!list_empty(&ctx->tsgl))
sg = sgl->sg;
if (!sg || sgl->cur >= MAX_SGL_ENTS) {
sgl = sock_kmalloc(sk, sizeof(*sgl) +
sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
GFP_KERNEL);
if (!sgl)
return -ENOMEM;
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
if (sg)
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
list_add_tail(&sgl->list, &ctx->tsgl);
}
return 0;
}
static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int i;
while (!list_empty(&ctx->tsgl)) {
sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
list);
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t plen = min_t(size_t, used, sg[i].length);
if (!sg_page(sg + i))
continue;
sg[i].length -= plen;
sg[i].offset += plen;
used -= plen;
ctx->used -= plen;
if (sg[i].length)
return;
if (put)
put_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
}
list_del(&sgl->list);
sock_kfree_s(sk, sgl,
sizeof(*sgl) + sizeof(sgl->sg[0]) *
(MAX_SGL_ENTS + 1));
}
if (!ctx->used)
ctx->merge = 0;
}
static void skcipher_free_sgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
skcipher_pull_sgl(sk, ctx->used, 1);
}
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
{
long timeout;
DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
for (;;) {
if (signal_pending(current))
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
err = 0;
break;
}
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
static void skcipher_wmem_wakeup(struct sock *sk)
{
struct socket_wq *wq;
if (!skcipher_writable(sk))
return;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM |
POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
long timeout;
DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT) {
return -EAGAIN;
}
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout, ctx->used)) {
err = 0;
break;
}
}
finish_wait(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}
static void skcipher_data_wakeup(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct socket_wq *wq;
if (!ctx->used)
return;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLRDNORM |
POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
long copied = 0;
bool enc = 0;
bool init = 0;
int err;
int i;
if (msg->msg_controllen) {
err = af_alg_cmsg_send(msg, &con);
if (err)
return err;
init = 1;
switch (con.op) {
case ALG_OP_ENCRYPT:
enc = 1;
break;
case ALG_OP_DECRYPT:
enc = 0;
break;
default:
return -EINVAL;
}
if (con.iv && con.iv->ivlen != ivsize)
return -EINVAL;
}
err = -EINVAL;
lock_sock(sk);
if (!ctx->more && ctx->used)
goto unlock;
if (init) {
ctx->enc = enc;
if (con.iv)
memcpy(ctx->iv, con.iv->iv, ivsize);
}
while (size) {
struct scatterlist *sg;
unsigned long len = size;
size_t plen;
if (ctx->merge) {
sgl = list_entry(ctx->tsgl.prev,
struct skcipher_sg_list, list);
sg = sgl->sg + sgl->cur - 1;
len = min_t(unsigned long, len,
PAGE_SIZE - sg->offset - sg->length);
err = memcpy_from_msg(page_address(sg_page(sg)) +
sg->offset + sg->length,
msg, len);
if (err)
goto unlock;
sg->length += len;
ctx->merge = (sg->offset + sg->length) &
(PAGE_SIZE - 1);
ctx->used += len;
copied += len;
size -= len;
continue;
}
if (!skcipher_writable(sk)) {
err = skcipher_wait_for_wmem(sk, msg->msg_flags);
if (err)
goto unlock;
}
len = min_t(unsigned long, len, skcipher_sndbuf(sk));
err = skcipher_alloc_sgl(sk);
if (err)
goto unlock;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
sg = sgl->sg;
sg_unmark_end(sg + sgl->cur);
do {
i = sgl->cur;
plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
err = -ENOMEM;
if (!sg_page(sg + i))
goto unlock;
err = memcpy_from_msg(page_address(sg_page(sg + i)),
msg, plen);
if (err) {
__free_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
goto unlock;
}
sg[i].length = plen;
len -= plen;
ctx->used += plen;
copied += plen;
size -= plen;
sgl->cur++;
} while (len && sgl->cur < MAX_SGL_ENTS);
if (!size)
sg_mark_end(sg + sgl->cur - 1);
ctx->merge = plen & (PAGE_SIZE - 1);
}
err = 0;
ctx->more = msg->msg_flags & MSG_MORE;
unlock:
skcipher_data_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
int err = -EINVAL;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
lock_sock(sk);
if (!ctx->more && ctx->used)
goto unlock;
if (!size)
goto done;
if (!skcipher_writable(sk)) {
err = skcipher_wait_for_wmem(sk, flags);
if (err)
goto unlock;
}
err = skcipher_alloc_sgl(sk);
if (err)
goto unlock;
ctx->merge = 0;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
if (sgl->cur)
sg_unmark_end(sgl->sg + sgl->cur - 1);
sg_mark_end(sgl->sg + sgl->cur);
get_page(page);
sg_set_page(sgl->sg + sgl->cur, page, size, offset);
sgl->cur++;
ctx->used += size;
done:
ctx->more = flags & MSG_MORE;
unlock:
skcipher_data_wakeup(sk);
release_sock(sk);
return err ?: size;
}
static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
{
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int nents = 0;
list_for_each_entry(sgl, &ctx->tsgl, list) {
sg = sgl->sg;
while (!sg->length)
sg++;
nents += sg_nents(sg);
}
return nents;
}
static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
unsigned int reqlen = sizeof(struct skcipher_async_req) +
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
int err = -ENOMEM;
bool mark = false;
lock_sock(sk);
req = kmalloc(reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
sreq = GET_SREQ(req, ctx);
sreq->iocb = msg->msg_iocb;
memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
if (unlikely(!sreq->tsg)) {
kfree(req);
goto unlock;
}
sg_init_table(sreq->tsg, tx_nents);
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_async_cb, sk);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
int used;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto free;
}
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = min_t(unsigned long, ctx->used,
iov_iter_count(&msg->msg_iter));
used = min_t(unsigned long, used, sg->length);
if (txbufs == tx_nents) {
struct scatterlist *tmp;
int x;
/* Ran out of tx slots in async request
* need to expand */
tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
GFP_KERNEL);
if (!tmp)
goto free;
sg_init_table(tmp, tx_nents * 2);
for (x = 0; x < tx_nents; x++)
sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
sreq->tsg[x].length,
sreq->tsg[x].offset);
kfree(sreq->tsg);
sreq->tsg = tmp;
tx_nents *= 2;
mark = true;
}
/* Need to take over the tx sgl from ctx
* to the asynch req - these sgls will be freed later */
sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
sg->offset);
if (list_empty(&sreq->list)) {
rsgl = &sreq->first_sgl;
list_add_tail(&rsgl->list, &sreq->list);
} else {
rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
if (!rsgl) {
err = -ENOMEM;
goto free;
}
list_add_tail(&rsgl->list, &sreq->list);
}
used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
err = used;
if (used < 0)
goto free;
if (last_rsgl)
af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl;
len += used;
skcipher_pull_sgl(sk, used, 0);
iov_iter_advance(&msg->msg_iter, used);
}
if (mark)
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
len, sreq->iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return err;
}
static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
while (msg_data_left(msg)) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, ctx->used, msg_data_left(msg));
used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_skcipher_encrypt(&ctx->req) :
crypto_skcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
skcipher_pull_sgl(sk, used, 1);
iov_iter_advance(&msg->msg_iter, used);
}
err = 0;
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
skcipher_recvmsg_async(sock, msg, flags) :
skcipher_recvmsg_sync(sock, msg, flags);
}
static unsigned int skcipher_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
if (ctx->used)
mask |= POLLIN | POLLRDNORM;
if (skcipher_writable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
}
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.setsockopt = sock_no_setsockopt,
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
.sendpage = skcipher_sendpage,
.recvmsg = skcipher_recvmsg,
.poll = skcipher_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
struct skcipher_tfm *tfm;
struct crypto_skcipher *skcipher;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
skcipher = crypto_alloc_skcipher(name, type, mask);
if (IS_ERR(skcipher)) {
kfree(tfm);
return ERR_CAST(skcipher);
}
tfm->skcipher = skcipher;
return tfm;
}
static void skcipher_release(void *private)
{
struct skcipher_tfm *tfm = private;
crypto_free_skcipher(tfm->skcipher);
kfree(tfm);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
struct skcipher_tfm *tfm = private;
int err;
err = crypto_skcipher_setkey(tfm->skcipher, key, keylen);
tfm->has_key = !err;
return err;
}
static void skcipher_wait(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
int ctr = 0;
while (atomic_read(&ctx->inflight) && ctr++ < 100)
msleep(100);
}
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
if (atomic_read(&ctx->inflight))
skcipher_wait(sk);
skcipher_free_sgl(sk);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int skcipher_accept_parent(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_tfm *tfm = private;
struct crypto_skcipher *skcipher = tfm->skcipher;
unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(skcipher);
if (!tfm->has_key)
return -ENOKEY;
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(skcipher),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->iv, 0, crypto_skcipher_ivsize(skcipher));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
ctx->used = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
skcipher_request_set_tfm(&ctx->req, skcipher);
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
return 0;
}
static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
.accept = skcipher_accept_parent,
.ops = &algif_skcipher_ops,
.name = "skcipher",
.owner = THIS_MODULE
};
static int __init algif_skcipher_init(void)
{
return af_alg_register_type(&algif_type_skcipher);
}
static void __exit algif_skcipher_exit(void)
{
int err = af_alg_unregister_type(&algif_type_skcipher);
BUG_ON(err);
}
module_init(algif_skcipher_init);
module_exit(algif_skcipher_exit);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_1863_0 |
crossvul-cpp_data_good_3024_0 | /*
* fs/f2fs/segment.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/prefetch.h>
#include <linux/kthread.h>
#include <linux/swap.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include "f2fs.h"
#include "segment.h"
#include "node.h"
#include "trace.h"
#include <trace/events/f2fs.h>
#define __reverse_ffz(x) __reverse_ffs(~(x))
static struct kmem_cache *discard_entry_slab;
static struct kmem_cache *discard_cmd_slab;
static struct kmem_cache *sit_entry_set_slab;
static struct kmem_cache *inmem_entry_slab;
static unsigned long __reverse_ulong(unsigned char *str)
{
unsigned long tmp = 0;
int shift = 24, idx = 0;
#if BITS_PER_LONG == 64
shift = 56;
#endif
while (shift >= 0) {
tmp |= (unsigned long)str[idx++] << shift;
shift -= BITS_PER_BYTE;
}
return tmp;
}
/*
* __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
* MSB and LSB are reversed in a byte by f2fs_set_bit.
*/
static inline unsigned long __reverse_ffs(unsigned long word)
{
int num = 0;
#if BITS_PER_LONG == 64
if ((word & 0xffffffff00000000UL) == 0)
num += 32;
else
word >>= 32;
#endif
if ((word & 0xffff0000) == 0)
num += 16;
else
word >>= 16;
if ((word & 0xff00) == 0)
num += 8;
else
word >>= 8;
if ((word & 0xf0) == 0)
num += 4;
else
word >>= 4;
if ((word & 0xc) == 0)
num += 2;
else
word >>= 2;
if ((word & 0x2) == 0)
num += 1;
return num;
}
/*
* __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
* f2fs_set_bit makes MSB and LSB reversed in a byte.
* @size must be integral times of unsigned long.
* Example:
* MSB <--> LSB
* f2fs_set_bit(0, bitmap) => 1000 0000
* f2fs_set_bit(7, bitmap) => 0000 0001
*/
static unsigned long __find_rev_next_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
while (1) {
if (*p == 0)
goto pass;
tmp = __reverse_ulong((unsigned char *)p);
tmp &= ~0UL >> offset;
if (size < BITS_PER_LONG)
tmp &= (~0UL << (BITS_PER_LONG - size));
if (tmp)
goto found;
pass:
if (size <= BITS_PER_LONG)
break;
size -= BITS_PER_LONG;
offset = 0;
p++;
}
return result;
found:
return result - size + __reverse_ffs(tmp);
}
static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
unsigned long size, unsigned long offset)
{
const unsigned long *p = addr + BIT_WORD(offset);
unsigned long result = size;
unsigned long tmp;
if (offset >= size)
return size;
size -= (offset & ~(BITS_PER_LONG - 1));
offset %= BITS_PER_LONG;
while (1) {
if (*p == ~0UL)
goto pass;
tmp = __reverse_ulong((unsigned char *)p);
if (offset)
tmp |= ~0UL << (BITS_PER_LONG - offset);
if (size < BITS_PER_LONG)
tmp |= ~0UL >> size;
if (tmp != ~0UL)
goto found;
pass:
if (size <= BITS_PER_LONG)
break;
size -= BITS_PER_LONG;
offset = 0;
p++;
}
return result;
found:
return result - size + __reverse_ffz(tmp);
}
void register_inmem_page(struct inode *inode, struct page *page)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *new;
f2fs_trace_pid(page);
set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
SetPagePrivate(page);
new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
/* add atomic page indices to the list */
new->page = page;
INIT_LIST_HEAD(&new->list);
/* increase reference count with clean state */
mutex_lock(&fi->inmem_lock);
get_page(page);
list_add_tail(&new->list, &fi->inmem_pages);
inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
mutex_unlock(&fi->inmem_lock);
trace_f2fs_register_inmem_page(page, INMEM);
}
static int __revoke_inmem_pages(struct inode *inode,
struct list_head *head, bool drop, bool recover)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct inmem_pages *cur, *tmp;
int err = 0;
list_for_each_entry_safe(cur, tmp, head, list) {
struct page *page = cur->page;
if (drop)
trace_f2fs_commit_inmem_page(page, INMEM_DROP);
lock_page(page);
if (recover) {
struct dnode_of_data dn;
struct node_info ni;
trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
set_new_dnode(&dn, inode, NULL, NULL, 0);
if (get_dnode_of_data(&dn, page->index, LOOKUP_NODE)) {
err = -EAGAIN;
goto next;
}
get_node_info(sbi, dn.nid, &ni);
f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
cur->old_addr, ni.version, true, true);
f2fs_put_dnode(&dn);
}
next:
/* we don't need to invalidate this in the sccessful status */
if (drop || recover)
ClearPageUptodate(page);
set_page_private(page, 0);
ClearPagePrivate(page);
f2fs_put_page(page, 1);
list_del(&cur->list);
kmem_cache_free(inmem_entry_slab, cur);
dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
}
return err;
}
void drop_inmem_pages(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_FILE);
stat_dec_atomic_write(inode);
}
void drop_inmem_page(struct inode *inode, struct page *page)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct list_head *head = &fi->inmem_pages;
struct inmem_pages *cur = NULL;
f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
mutex_lock(&fi->inmem_lock);
list_for_each_entry(cur, head, list) {
if (cur->page == page)
break;
}
f2fs_bug_on(sbi, !cur || cur->page != page);
list_del(&cur->list);
mutex_unlock(&fi->inmem_lock);
dec_page_count(sbi, F2FS_INMEM_PAGES);
kmem_cache_free(inmem_entry_slab, cur);
ClearPageUptodate(page);
set_page_private(page, 0);
ClearPagePrivate(page);
f2fs_put_page(page, 0);
trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
}
static int __commit_inmem_pages(struct inode *inode,
struct list_head *revoke_list)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct inmem_pages *cur, *tmp;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_PRIO,
};
pgoff_t last_idx = ULONG_MAX;
int err = 0;
list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
struct page *page = cur->page;
lock_page(page);
if (page->mapping == inode->i_mapping) {
trace_f2fs_commit_inmem_page(page, INMEM);
set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page)) {
inode_dec_dirty_pages(inode);
remove_dirty_inode(inode);
}
fio.page = page;
fio.old_blkaddr = NULL_ADDR;
fio.encrypted_page = NULL;
fio.need_lock = LOCK_DONE;
err = do_write_data_page(&fio);
if (err) {
unlock_page(page);
break;
}
/* record old blkaddr for revoking */
cur->old_addr = fio.old_blkaddr;
last_idx = page->index;
}
unlock_page(page);
list_move_tail(&cur->list, revoke_list);
}
if (last_idx != ULONG_MAX)
f2fs_submit_merged_write_cond(sbi, inode, 0, last_idx, DATA);
if (!err)
__revoke_inmem_pages(inode, revoke_list, false, false);
return err;
}
int commit_inmem_pages(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct list_head revoke_list;
int err;
INIT_LIST_HEAD(&revoke_list);
f2fs_balance_fs(sbi, true);
f2fs_lock_op(sbi);
set_inode_flag(inode, FI_ATOMIC_COMMIT);
mutex_lock(&fi->inmem_lock);
err = __commit_inmem_pages(inode, &revoke_list);
if (err) {
int ret;
/*
* try to revoke all committed pages, but still we could fail
* due to no memory or other reason, if that happened, EAGAIN
* will be returned, which means in such case, transaction is
* already not integrity, caller should use journal to do the
* recovery or rewrite & commit last transaction. For other
* error number, revoking was done by filesystem itself.
*/
ret = __revoke_inmem_pages(inode, &revoke_list, false, true);
if (ret)
err = ret;
/* drop all uncommitted pages */
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
}
mutex_unlock(&fi->inmem_lock);
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
f2fs_unlock_op(sbi);
return err;
}
/*
* This function balances dirty node and dentry pages.
* In addition, it controls garbage collection.
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
f2fs_show_injection_info(FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
#endif
/* balance_fs_bg is able to be pending */
if (need && excess_cached_nats(sbi))
f2fs_balance_fs_bg(sbi);
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
*/
if (has_not_enough_free_secs(sbi, 0, 0)) {
mutex_lock(&sbi->gc_mutex);
f2fs_gc(sbi, false, false, NULL_SEGNO);
}
}
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
{
/* try to shrink extent cache when there is no enough memory */
if (!available_free_memory(sbi, EXTENT_CACHE))
f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
/* check the # of cached NAT entries */
if (!available_free_memory(sbi, NAT_ENTRIES))
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS))
try_to_free_nids(sbi, MAX_FREE_NIDS);
else
build_free_nids(sbi, false, false);
if (!is_idle(sbi) && !excess_dirty_nats(sbi))
return;
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
!available_free_memory(sbi, INO_ENTRIES) ||
excess_prefree_segs(sbi) ||
excess_dirty_nats(sbi) ||
f2fs_time_over(sbi, CP_TIME)) {
if (test_opt(sbi, DATA_FLUSH)) {
struct blk_plug plug;
blk_start_plug(&plug);
sync_dirty_inodes(sbi, FILE_INODE);
blk_finish_plug(&plug);
}
f2fs_sync_fs(sbi->sb, true);
stat_inc_bg_cp_count(sbi->stat_info);
}
}
static int __submit_flush_wait(struct f2fs_sb_info *sbi,
struct block_device *bdev)
{
struct bio *bio = f2fs_bio_alloc(0);
int ret;
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
bio->bi_bdev = bdev;
ret = submit_bio_wait(bio);
bio_put(bio);
trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
test_opt(sbi, FLUSH_MERGE), ret);
return ret;
}
static int submit_flush_wait(struct f2fs_sb_info *sbi)
{
int ret = __submit_flush_wait(sbi, sbi->sb->s_bdev);
int i;
if (!sbi->s_ndevs || ret)
return ret;
for (i = 1; i < sbi->s_ndevs; i++) {
ret = __submit_flush_wait(sbi, FDEV(i).bdev);
if (ret)
break;
}
return ret;
}
static int issue_flush_thread(void *data)
{
struct f2fs_sb_info *sbi = data;
struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
wait_queue_head_t *q = &fcc->flush_wait_queue;
repeat:
if (kthread_should_stop())
return 0;
if (!llist_empty(&fcc->issue_list)) {
struct flush_cmd *cmd, *next;
int ret;
fcc->dispatch_list = llist_del_all(&fcc->issue_list);
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
ret = submit_flush_wait(sbi);
atomic_inc(&fcc->issued_flush);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
cmd->ret = ret;
complete(&cmd->wait);
}
fcc->dispatch_list = NULL;
}
wait_event_interruptible(*q,
kthread_should_stop() || !llist_empty(&fcc->issue_list));
goto repeat;
}
int f2fs_issue_flush(struct f2fs_sb_info *sbi)
{
struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
struct flush_cmd cmd;
int ret;
if (test_opt(sbi, NOBARRIER))
return 0;
if (!test_opt(sbi, FLUSH_MERGE)) {
ret = submit_flush_wait(sbi);
atomic_inc(&fcc->issued_flush);
return ret;
}
if (!atomic_read(&fcc->issing_flush)) {
atomic_inc(&fcc->issing_flush);
ret = submit_flush_wait(sbi);
atomic_dec(&fcc->issing_flush);
atomic_inc(&fcc->issued_flush);
return ret;
}
init_completion(&cmd.wait);
atomic_inc(&fcc->issing_flush);
llist_add(&cmd.llnode, &fcc->issue_list);
if (!fcc->dispatch_list)
wake_up(&fcc->flush_wait_queue);
if (fcc->f2fs_issue_flush) {
wait_for_completion(&cmd.wait);
atomic_dec(&fcc->issing_flush);
} else {
llist_del_all(&fcc->issue_list);
atomic_set(&fcc->issing_flush, 0);
}
return cmd.ret;
}
int create_flush_cmd_control(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
struct flush_cmd_control *fcc;
int err = 0;
if (SM_I(sbi)->fcc_info) {
fcc = SM_I(sbi)->fcc_info;
goto init_thread;
}
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
atomic_set(&fcc->issued_flush, 0);
atomic_set(&fcc->issing_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->fcc_info = fcc;
if (!test_opt(sbi, FLUSH_MERGE))
return err;
init_thread:
fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
"f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(fcc->f2fs_issue_flush)) {
err = PTR_ERR(fcc->f2fs_issue_flush);
kfree(fcc);
SM_I(sbi)->fcc_info = NULL;
return err;
}
return err;
}
void destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
{
struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
if (fcc && fcc->f2fs_issue_flush) {
struct task_struct *flush_thread = fcc->f2fs_issue_flush;
fcc->f2fs_issue_flush = NULL;
kthread_stop(flush_thread);
}
if (free) {
kfree(fcc);
SM_I(sbi)->fcc_info = NULL;
}
}
static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
/* need not be added */
if (IS_CURSEG(sbi, segno))
return;
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]++;
if (dirty_type == DIRTY) {
struct seg_entry *sentry = get_seg_entry(sbi, segno);
enum dirty_type t = sentry->type;
if (unlikely(t >= DIRTY)) {
f2fs_bug_on(sbi, 1);
return;
}
if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
dirty_i->nr_dirty[t]++;
}
}
static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
enum dirty_type dirty_type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
dirty_i->nr_dirty[dirty_type]--;
if (dirty_type == DIRTY) {
struct seg_entry *sentry = get_seg_entry(sbi, segno);
enum dirty_type t = sentry->type;
if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
dirty_i->nr_dirty[t]--;
if (get_valid_blocks(sbi, segno, true) == 0)
clear_bit(GET_SEC_FROM_SEG(sbi, segno),
dirty_i->victim_secmap);
}
}
/*
* Should not occur error such as -ENOMEM.
* Adding dirty entry into seglist is not critical operation.
* If a given segment is one of current working segments, it won't be added.
*/
static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned short valid_blocks;
if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
return;
mutex_lock(&dirty_i->seglist_lock);
valid_blocks = get_valid_blocks(sbi, segno, false);
if (valid_blocks == 0) {
__locate_dirty_segment(sbi, segno, PRE);
__remove_dirty_segment(sbi, segno, DIRTY);
} else if (valid_blocks < sbi->blocks_per_seg) {
__locate_dirty_segment(sbi, segno, DIRTY);
} else {
/* Recovery routine with SSR needs this */
__remove_dirty_segment(sbi, segno, DIRTY);
}
mutex_unlock(&dirty_i->seglist_lock);
}
static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *pend_list;
struct discard_cmd *dc;
f2fs_bug_on(sbi, !len);
pend_list = &dcc->pend_list[plist_idx(len)];
dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
INIT_LIST_HEAD(&dc->list);
dc->bdev = bdev;
dc->lstart = lstart;
dc->start = start;
dc->len = len;
dc->ref = 0;
dc->state = D_PREP;
dc->error = 0;
init_completion(&dc->wait);
list_add_tail(&dc->list, pend_list);
atomic_inc(&dcc->discard_cmd_cnt);
dcc->undiscard_blks += len;
return dc;
}
static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len,
struct rb_node *parent, struct rb_node **p)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *dc;
dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
rb_link_node(&dc->rb_node, parent, p);
rb_insert_color(&dc->rb_node, &dcc->root);
return dc;
}
static void __detach_discard_cmd(struct discard_cmd_control *dcc,
struct discard_cmd *dc)
{
if (dc->state == D_DONE)
atomic_dec(&dcc->issing_discard);
list_del(&dc->list);
rb_erase(&dc->rb_node, &dcc->root);
dcc->undiscard_blks -= dc->len;
kmem_cache_free(discard_cmd_slab, dc);
atomic_dec(&dcc->discard_cmd_cnt);
}
static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
if (dc->error == -EOPNOTSUPP)
dc->error = 0;
if (dc->error)
f2fs_msg(sbi->sb, KERN_INFO,
"Issue discard(%u, %u, %u) failed, ret: %d",
dc->lstart, dc->start, dc->len, dc->error);
__detach_discard_cmd(dcc, dc);
}
static void f2fs_submit_discard_endio(struct bio *bio)
{
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
dc->error = bio->bi_error;
dc->state = D_DONE;
complete_all(&dc->wait);
bio_put(bio);
}
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct bio *bio = NULL;
if (dc->state != D_PREP)
return;
trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
dc->error = __blkdev_issue_discard(dc->bdev,
SECTOR_FROM_BLOCK(dc->start),
SECTOR_FROM_BLOCK(dc->len),
GFP_NOFS, 0, &bio);
if (!dc->error) {
/* should keep before submission to avoid D_DONE right away */
dc->state = D_SUBMIT;
atomic_inc(&dcc->issued_discard);
atomic_inc(&dcc->issing_discard);
if (bio) {
bio->bi_private = dc;
bio->bi_end_io = f2fs_submit_discard_endio;
bio->bi_opf |= REQ_SYNC;
submit_bio(bio);
list_move_tail(&dc->list, &dcc->wait_list);
}
} else {
__remove_discard_cmd(sbi, dc);
}
}
static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len,
struct rb_node **insert_p,
struct rb_node *insert_parent)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct rb_node **p = &dcc->root.rb_node;
struct rb_node *parent = NULL;
struct discard_cmd *dc = NULL;
if (insert_p && insert_parent) {
parent = insert_parent;
p = insert_p;
goto do_insert;
}
p = __lookup_rb_tree_for_insert(sbi, &dcc->root, &parent, lstart);
do_insert:
dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent, p);
if (!dc)
return NULL;
return dc;
}
static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
struct discard_cmd *dc)
{
list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
}
static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_cmd *dc, block_t blkaddr)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_info di = dc->di;
bool modified = false;
if (dc->state == D_DONE || dc->len == 1) {
__remove_discard_cmd(sbi, dc);
return;
}
dcc->undiscard_blks -= di.len;
if (blkaddr > di.lstart) {
dc->len = blkaddr - dc->lstart;
dcc->undiscard_blks += dc->len;
__relocate_discard_cmd(dcc, dc);
modified = true;
}
if (blkaddr < di.lstart + di.len - 1) {
if (modified) {
__insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
di.start + blkaddr + 1 - di.lstart,
di.lstart + di.len - 1 - blkaddr,
NULL, NULL);
} else {
dc->lstart++;
dc->len--;
dc->start++;
dcc->undiscard_blks += dc->len;
__relocate_discard_cmd(dcc, dc);
}
}
}
static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t lstart,
block_t start, block_t len)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
struct discard_cmd *dc;
struct discard_info di = {0};
struct rb_node **insert_p = NULL, *insert_parent = NULL;
block_t end = lstart + len;
mutex_lock(&dcc->cmd_lock);
dc = (struct discard_cmd *)__lookup_rb_tree_ret(&dcc->root,
NULL, lstart,
(struct rb_entry **)&prev_dc,
(struct rb_entry **)&next_dc,
&insert_p, &insert_parent, true);
if (dc)
prev_dc = dc;
if (!prev_dc) {
di.lstart = lstart;
di.len = next_dc ? next_dc->lstart - lstart : len;
di.len = min(di.len, len);
di.start = start;
}
while (1) {
struct rb_node *node;
bool merged = false;
struct discard_cmd *tdc = NULL;
if (prev_dc) {
di.lstart = prev_dc->lstart + prev_dc->len;
if (di.lstart < lstart)
di.lstart = lstart;
if (di.lstart >= end)
break;
if (!next_dc || next_dc->lstart > end)
di.len = end - di.lstart;
else
di.len = next_dc->lstart - di.lstart;
di.start = start + di.lstart - lstart;
}
if (!di.len)
goto next;
if (prev_dc && prev_dc->state == D_PREP &&
prev_dc->bdev == bdev &&
__is_discard_back_mergeable(&di, &prev_dc->di)) {
prev_dc->di.len += di.len;
dcc->undiscard_blks += di.len;
__relocate_discard_cmd(dcc, prev_dc);
di = prev_dc->di;
tdc = prev_dc;
merged = true;
}
if (next_dc && next_dc->state == D_PREP &&
next_dc->bdev == bdev &&
__is_discard_front_mergeable(&di, &next_dc->di)) {
next_dc->di.lstart = di.lstart;
next_dc->di.len += di.len;
next_dc->di.start = di.start;
dcc->undiscard_blks += di.len;
__relocate_discard_cmd(dcc, next_dc);
if (tdc)
__remove_discard_cmd(sbi, tdc);
merged = true;
}
if (!merged) {
__insert_discard_tree(sbi, bdev, di.lstart, di.start,
di.len, NULL, NULL);
}
next:
prev_dc = next_dc;
if (!prev_dc)
break;
node = rb_next(&prev_dc->rb_node);
next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
}
mutex_unlock(&dcc->cmd_lock);
}
static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
block_t lblkstart = blkstart;
trace_f2fs_queue_discard(bdev, blkstart, blklen);
if (sbi->s_ndevs) {
int devi = f2fs_target_device_index(sbi, blkstart);
blkstart -= FDEV(devi).start_blk;
}
__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
return 0;
}
static void __issue_discard_cmd(struct f2fs_sb_info *sbi, bool issue_cond)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
int i, iter = 0;
mutex_lock(&dcc->cmd_lock);
f2fs_bug_on(sbi,
!__check_rb_tree_consistence(sbi, &dcc->root));
blk_start_plug(&plug);
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
pend_list = &dcc->pend_list[i];
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
if (!issue_cond || is_idle(sbi))
__submit_discard_cmd(sbi, dc);
if (issue_cond && iter++ > DISCARD_ISSUE_RATE)
goto out;
}
}
out:
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
}
static void __wait_discard_cmd(struct f2fs_sb_info *sbi, bool wait_cond)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *wait_list = &(dcc->wait_list);
struct discard_cmd *dc, *tmp;
bool need_wait;
next:
need_wait = false;
mutex_lock(&dcc->cmd_lock);
list_for_each_entry_safe(dc, tmp, wait_list, list) {
if (!wait_cond || (dc->state == D_DONE && !dc->ref)) {
wait_for_completion_io(&dc->wait);
__remove_discard_cmd(sbi, dc);
} else {
dc->ref++;
need_wait = true;
break;
}
}
mutex_unlock(&dcc->cmd_lock);
if (need_wait) {
wait_for_completion_io(&dc->wait);
mutex_lock(&dcc->cmd_lock);
f2fs_bug_on(sbi, dc->state != D_DONE);
dc->ref--;
if (!dc->ref)
__remove_discard_cmd(sbi, dc);
mutex_unlock(&dcc->cmd_lock);
goto next;
}
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct discard_cmd *dc;
bool need_wait = false;
mutex_lock(&dcc->cmd_lock);
dc = (struct discard_cmd *)__lookup_rb_tree(&dcc->root, NULL, blkaddr);
if (dc) {
if (dc->state == D_PREP) {
__punch_discard_cmd(sbi, dc, blkaddr);
} else {
dc->ref++;
need_wait = true;
}
}
mutex_unlock(&dcc->cmd_lock);
if (need_wait) {
wait_for_completion_io(&dc->wait);
mutex_lock(&dcc->cmd_lock);
f2fs_bug_on(sbi, dc->state != D_DONE);
dc->ref--;
if (!dc->ref)
__remove_discard_cmd(sbi, dc);
mutex_unlock(&dcc->cmd_lock);
}
}
/* This comes from f2fs_put_super */
void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi)
{
__issue_discard_cmd(sbi, false);
__wait_discard_cmd(sbi, false);
}
static int issue_discard_thread(void *data)
{
struct f2fs_sb_info *sbi = data;
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
wait_queue_head_t *q = &dcc->discard_wait_queue;
set_freezable();
do {
wait_event_interruptible(*q, kthread_should_stop() ||
freezing(current) ||
atomic_read(&dcc->discard_cmd_cnt));
if (try_to_freeze())
continue;
if (kthread_should_stop())
return 0;
__issue_discard_cmd(sbi, true);
__wait_discard_cmd(sbi, true);
congestion_wait(BLK_RW_SYNC, HZ/50);
} while (!kthread_should_stop());
return 0;
}
#ifdef CONFIG_BLK_DEV_ZONED
static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
sector_t sector, nr_sects;
block_t lblkstart = blkstart;
int devi = 0;
if (sbi->s_ndevs) {
devi = f2fs_target_device_index(sbi, blkstart);
blkstart -= FDEV(devi).start_blk;
}
/*
* We need to know the type of the zone: for conventional zones,
* use regular discard if the drive supports it. For sequential
* zones, reset the zone write pointer.
*/
switch (get_blkz_type(sbi, bdev, blkstart)) {
case BLK_ZONE_TYPE_CONVENTIONAL:
if (!blk_queue_discard(bdev_get_queue(bdev)))
return 0;
return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
case BLK_ZONE_TYPE_SEQWRITE_REQ:
case BLK_ZONE_TYPE_SEQWRITE_PREF:
sector = SECTOR_FROM_BLOCK(blkstart);
nr_sects = SECTOR_FROM_BLOCK(blklen);
if (sector & (bdev_zone_sectors(bdev) - 1) ||
nr_sects != bdev_zone_sectors(bdev)) {
f2fs_msg(sbi->sb, KERN_INFO,
"(%d) %s: Unaligned discard attempted (block %x + %x)",
devi, sbi->s_ndevs ? FDEV(devi).path: "",
blkstart, blklen);
return -EIO;
}
trace_f2fs_issue_reset_zone(bdev, blkstart);
return blkdev_reset_zones(bdev, sector,
nr_sects, GFP_NOFS);
default:
/* Unknown zone type: broken device ? */
return -EIO;
}
}
#endif
static int __issue_discard_async(struct f2fs_sb_info *sbi,
struct block_device *bdev, block_t blkstart, block_t blklen)
{
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
bdev_zoned_model(bdev) != BLK_ZONED_NONE)
return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
#endif
return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
}
static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
block_t blkstart, block_t blklen)
{
sector_t start = blkstart, len = 0;
struct block_device *bdev;
struct seg_entry *se;
unsigned int offset;
block_t i;
int err = 0;
bdev = f2fs_target_device(sbi, blkstart, NULL);
for (i = blkstart; i < blkstart + blklen; i++, len++) {
if (i != start) {
struct block_device *bdev2 =
f2fs_target_device(sbi, i, NULL);
if (bdev2 != bdev) {
err = __issue_discard_async(sbi, bdev,
start, len);
if (err)
return err;
bdev = bdev2;
start = i;
len = 0;
}
}
se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
offset = GET_BLKOFF_FROM_SEG0(sbi, i);
if (!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
}
if (len)
err = __issue_discard_async(sbi, bdev, start, len);
return err;
}
static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
bool check_only)
{
int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
int max_blocks = sbi->blocks_per_seg;
struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
unsigned long *discard_map = (unsigned long *)se->discard_map;
unsigned long *dmap = SIT_I(sbi)->tmp_map;
unsigned int start = 0, end = -1;
bool force = (cpc->reason & CP_DISCARD);
struct discard_entry *de = NULL;
struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
int i;
if (se->valid_blocks == max_blocks || !f2fs_discard_en(sbi))
return false;
if (!force) {
if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
SM_I(sbi)->dcc_info->nr_discards >=
SM_I(sbi)->dcc_info->max_discards)
return false;
}
/* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
for (i = 0; i < entries; i++)
dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
(cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
while (force || SM_I(sbi)->dcc_info->nr_discards <=
SM_I(sbi)->dcc_info->max_discards) {
start = __find_rev_next_bit(dmap, max_blocks, end + 1);
if (start >= max_blocks)
break;
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
if (force && start && end != max_blocks
&& (end - start) < cpc->trim_minlen)
continue;
if (check_only)
return true;
if (!de) {
de = f2fs_kmem_cache_alloc(discard_entry_slab,
GFP_F2FS_ZERO);
de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
list_add_tail(&de->list, head);
}
for (i = start; i < end; i++)
__set_bit_le(i, (void *)de->discard_map);
SM_I(sbi)->dcc_info->nr_discards += end - start;
}
return false;
}
void release_discard_addrs(struct f2fs_sb_info *sbi)
{
struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
struct discard_entry *entry, *this;
/* drop caches */
list_for_each_entry_safe(entry, this, head, list) {
list_del(&entry->list);
kmem_cache_free(discard_entry_slab, entry);
}
}
/*
* Should call clear_prefree_segments after checkpoint is done.
*/
static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int segno;
mutex_lock(&dirty_i->seglist_lock);
for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
__set_test_and_free(sbi, segno);
mutex_unlock(&dirty_i->seglist_lock);
}
void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
struct discard_entry *entry, *this;
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason & CP_DISCARD);
mutex_lock(&dirty_i->seglist_lock);
while (1) {
int i;
start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
if (start >= MAIN_SEGS(sbi))
break;
end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
start + 1);
for (i = start; i < end; i++)
clear_bit(i, prefree_map);
dirty_i->nr_dirty[PRE] -= end - start;
if (!test_opt(sbi, DISCARD))
continue;
if (force && start >= cpc->trim_start &&
(end - 1) <= cpc->trim_end)
continue;
if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
continue;
}
next:
secno = GET_SEC_FROM_SEG(sbi, start);
start_segno = GET_SEG_FROM_SEC(sbi, secno);
if (!IS_CURSEC(sbi, secno) &&
!get_valid_blocks(sbi, start, true))
f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
sbi->segs_per_sec << sbi->log_blocks_per_seg);
start = start_segno + sbi->segs_per_sec;
if (start < end)
goto next;
else
end = start - 1;
}
mutex_unlock(&dirty_i->seglist_lock);
/* send small discards */
list_for_each_entry_safe(entry, this, head, list) {
unsigned int cur_pos = 0, next_pos, len, total_len = 0;
bool is_valid = test_bit_le(0, entry->discard_map);
find_next:
if (is_valid) {
next_pos = find_next_zero_bit_le(entry->discard_map,
sbi->blocks_per_seg, cur_pos);
len = next_pos - cur_pos;
if (f2fs_sb_mounted_blkzoned(sbi->sb) ||
(force && len < cpc->trim_minlen))
goto skip;
f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
len);
cpc->trimmed += len;
total_len += len;
} else {
next_pos = find_next_bit_le(entry->discard_map,
sbi->blocks_per_seg, cur_pos);
}
skip:
cur_pos = next_pos;
is_valid = !is_valid;
if (cur_pos < sbi->blocks_per_seg)
goto find_next;
list_del(&entry->list);
SM_I(sbi)->dcc_info->nr_discards -= total_len;
kmem_cache_free(discard_entry_slab, entry);
}
wake_up(&SM_I(sbi)->dcc_info->discard_wait_queue);
}
static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
{
dev_t dev = sbi->sb->s_bdev->bd_dev;
struct discard_cmd_control *dcc;
int err = 0, i;
if (SM_I(sbi)->dcc_info) {
dcc = SM_I(sbi)->dcc_info;
goto init_thread;
}
dcc = kzalloc(sizeof(struct discard_cmd_control), GFP_KERNEL);
if (!dcc)
return -ENOMEM;
INIT_LIST_HEAD(&dcc->entry_list);
for (i = 0; i < MAX_PLIST_NUM; i++)
INIT_LIST_HEAD(&dcc->pend_list[i]);
INIT_LIST_HEAD(&dcc->wait_list);
mutex_init(&dcc->cmd_lock);
atomic_set(&dcc->issued_discard, 0);
atomic_set(&dcc->issing_discard, 0);
atomic_set(&dcc->discard_cmd_cnt, 0);
dcc->nr_discards = 0;
dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
dcc->undiscard_blks = 0;
dcc->root = RB_ROOT;
init_waitqueue_head(&dcc->discard_wait_queue);
SM_I(sbi)->dcc_info = dcc;
init_thread:
dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
"f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
if (IS_ERR(dcc->f2fs_issue_discard)) {
err = PTR_ERR(dcc->f2fs_issue_discard);
kfree(dcc);
SM_I(sbi)->dcc_info = NULL;
return err;
}
return err;
}
static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
if (!dcc)
return;
if (dcc->f2fs_issue_discard) {
struct task_struct *discard_thread = dcc->f2fs_issue_discard;
dcc->f2fs_issue_discard = NULL;
kthread_stop(discard_thread);
}
kfree(dcc);
SM_I(sbi)->dcc_info = NULL;
}
static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
{
struct sit_info *sit_i = SIT_I(sbi);
if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
sit_i->dirty_sentries++;
return false;
}
return true;
}
static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
unsigned int segno, int modified)
{
struct seg_entry *se = get_seg_entry(sbi, segno);
se->type = type;
if (modified)
__mark_sit_entry_dirty(sbi, segno);
}
static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
{
struct seg_entry *se;
unsigned int segno, offset;
long int new_vblocks;
segno = GET_SEGNO(sbi, blkaddr);
se = get_seg_entry(sbi, segno);
new_vblocks = se->valid_blocks + del;
offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
(new_vblocks > sbi->blocks_per_seg)));
se->valid_blocks = new_vblocks;
se->mtime = get_mtime(sbi);
SIT_I(sbi)->max_mtime = se->mtime;
/* Update valid block bitmap */
if (del > 0) {
if (f2fs_test_and_set_bit(offset, se->cur_valid_map)) {
#ifdef CONFIG_F2FS_CHECK_FS
if (f2fs_test_and_set_bit(offset,
se->cur_valid_map_mir))
f2fs_bug_on(sbi, 1);
else
WARN_ON(1);
#else
f2fs_bug_on(sbi, 1);
#endif
}
if (f2fs_discard_en(sbi) &&
!f2fs_test_and_set_bit(offset, se->discard_map))
sbi->discard_blks--;
/* don't overwrite by SSR to keep node chain */
if (se->type == CURSEG_WARM_NODE) {
if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks++;
}
} else {
if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) {
#ifdef CONFIG_F2FS_CHECK_FS
if (!f2fs_test_and_clear_bit(offset,
se->cur_valid_map_mir))
f2fs_bug_on(sbi, 1);
else
WARN_ON(1);
#else
f2fs_bug_on(sbi, 1);
#endif
}
if (f2fs_discard_en(sbi) &&
f2fs_test_and_clear_bit(offset, se->discard_map))
sbi->discard_blks++;
}
if (!f2fs_test_bit(offset, se->ckpt_valid_map))
se->ckpt_valid_blocks += del;
__mark_sit_entry_dirty(sbi, segno);
/* update total number of valid blocks to be written in ckpt area */
SIT_I(sbi)->written_valid_blocks += del;
if (sbi->segs_per_sec > 1)
get_sec_entry(sbi, segno)->valid_blocks += del;
}
void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
{
update_sit_entry(sbi, new, 1);
if (GET_SEGNO(sbi, old) != NULL_SEGNO)
update_sit_entry(sbi, old, -1);
locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
}
void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
{
unsigned int segno = GET_SEGNO(sbi, addr);
struct sit_info *sit_i = SIT_I(sbi);
f2fs_bug_on(sbi, addr == NULL_ADDR);
if (addr == NEW_ADDR)
return;
/* add it into sit main buffer */
mutex_lock(&sit_i->sentry_lock);
update_sit_entry(sbi, addr, -1);
/* add it into dirty seglist */
locate_dirty_segment(sbi, segno);
mutex_unlock(&sit_i->sentry_lock);
}
bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct sit_info *sit_i = SIT_I(sbi);
unsigned int segno, offset;
struct seg_entry *se;
bool is_cp = false;
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return true;
mutex_lock(&sit_i->sentry_lock);
segno = GET_SEGNO(sbi, blkaddr);
se = get_seg_entry(sbi, segno);
offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
if (f2fs_test_bit(offset, se->ckpt_valid_map))
is_cp = true;
mutex_unlock(&sit_i->sentry_lock);
return is_cp;
}
/*
* This function should be resided under the curseg_mutex lock
*/
static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
struct f2fs_summary *sum)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
void *addr = curseg->sum_blk;
addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
memcpy(addr, sum, sizeof(struct f2fs_summary));
}
/*
* Calculate the number of current summary pages for writing
*/
int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
{
int valid_sum_count = 0;
int i, sum_in_page;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
if (sbi->ckpt->alloc_type[i] == SSR)
valid_sum_count += sbi->blocks_per_seg;
else {
if (for_ra)
valid_sum_count += le16_to_cpu(
F2FS_CKPT(sbi)->cur_data_blkoff[i]);
else
valid_sum_count += curseg_blkoff(sbi, i);
}
}
sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
SUM_FOOTER_SIZE) / SUMMARY_SIZE;
if (valid_sum_count <= sum_in_page)
return 1;
else if ((valid_sum_count - sum_in_page) <=
(PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
return 2;
return 3;
}
/*
* Caller should put this summary page
*/
struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
{
return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
}
void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr)
{
struct page *page = grab_meta_page(sbi, blk_addr);
void *dst = page_address(page);
if (src)
memcpy(dst, src, PAGE_SIZE);
else
memset(dst, 0, PAGE_SIZE);
set_page_dirty(page);
f2fs_put_page(page, 1);
}
static void write_sum_page(struct f2fs_sb_info *sbi,
struct f2fs_summary_block *sum_blk, block_t blk_addr)
{
update_meta_page(sbi, (void *)sum_blk, blk_addr);
}
static void write_current_sum_page(struct f2fs_sb_info *sbi,
int type, block_t blk_addr)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
struct page *page = grab_meta_page(sbi, blk_addr);
struct f2fs_summary_block *src = curseg->sum_blk;
struct f2fs_summary_block *dst;
dst = (struct f2fs_summary_block *)page_address(page);
mutex_lock(&curseg->curseg_mutex);
down_read(&curseg->journal_rwsem);
memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
up_read(&curseg->journal_rwsem);
memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
mutex_unlock(&curseg->curseg_mutex);
set_page_dirty(page);
f2fs_put_page(page, 1);
}
static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int segno = curseg->segno + 1;
struct free_segmap_info *free_i = FREE_I(sbi);
if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
return !test_bit(segno, free_i->free_segmap);
return 0;
}
/*
* Find a new segment from the free segments bitmap to right order
* This function should be returned with success, otherwise BUG
*/
static void get_new_segment(struct f2fs_sb_info *sbi,
unsigned int *newseg, bool new_sec, int dir)
{
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno, secno, zoneno;
unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
unsigned int left_start = hint;
bool init = true;
int go_left = 0;
int i;
spin_lock(&free_i->segmap_lock);
if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
segno = find_next_zero_bit(free_i->free_segmap,
GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
goto got_it;
}
find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
if (secno >= MAIN_SECS(sbi)) {
if (dir == ALLOC_RIGHT) {
secno = find_next_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi), 0);
f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
} else {
go_left = 1;
left_start = hint - 1;
}
}
if (go_left == 0)
goto skip_left;
while (test_bit(left_start, free_i->free_secmap)) {
if (left_start > 0) {
left_start--;
continue;
}
left_start = find_next_zero_bit(free_i->free_secmap,
MAIN_SECS(sbi), 0);
f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
break;
}
secno = left_start;
skip_left:
hint = secno;
segno = GET_SEG_FROM_SEC(sbi, secno);
zoneno = GET_ZONE_FROM_SEC(sbi, secno);
/* give up on finding another zone */
if (!init)
goto got_it;
if (sbi->secs_per_zone == 1)
goto got_it;
if (zoneno == old_zoneno)
goto got_it;
if (dir == ALLOC_LEFT) {
if (!go_left && zoneno + 1 >= total_zones)
goto got_it;
if (go_left && zoneno == 0)
goto got_it;
}
for (i = 0; i < NR_CURSEG_TYPE; i++)
if (CURSEG_I(sbi, i)->zone == zoneno)
break;
if (i < NR_CURSEG_TYPE) {
/* zone is in user, try another */
if (go_left)
hint = zoneno * sbi->secs_per_zone - 1;
else if (zoneno + 1 >= total_zones)
hint = 0;
else
hint = (zoneno + 1) * sbi->secs_per_zone;
init = false;
goto find_other_zone;
}
got_it:
/* set it as dirty segment in free segmap */
f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
__set_inuse(sbi, segno);
*newseg = segno;
spin_unlock(&free_i->segmap_lock);
}
static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
struct summary_footer *sum_footer;
curseg->segno = curseg->next_segno;
curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
curseg->next_blkoff = 0;
curseg->next_segno = NULL_SEGNO;
sum_footer = &(curseg->sum_blk->footer);
memset(sum_footer, 0, sizeof(struct summary_footer));
if (IS_DATASEG(type))
SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
if (IS_NODESEG(type))
SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
__set_sit_entry_type(sbi, type, curseg->segno, modified);
}
static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
{
/* if segs_per_sec is large than 1, we need to keep original policy. */
if (sbi->segs_per_sec != 1)
return CURSEG_I(sbi, type)->segno;
if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
return 0;
if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
return SIT_I(sbi)->last_victim[ALLOC_NEXT];
return CURSEG_I(sbi, type)->segno;
}
/*
* Allocate a current working segment.
* This function always allocates a free segment in LFS manner.
*/
static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int segno = curseg->segno;
int dir = ALLOC_LEFT;
write_sum_page(sbi, curseg->sum_blk,
GET_SUM_BLOCK(sbi, segno));
if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
dir = ALLOC_RIGHT;
if (test_opt(sbi, NOHEAP))
dir = ALLOC_RIGHT;
segno = __get_next_segno(sbi, type);
get_new_segment(sbi, &segno, new_sec, dir);
curseg->next_segno = segno;
reset_curseg(sbi, type, 1);
curseg->alloc_type = LFS;
}
static void __next_free_blkoff(struct f2fs_sb_info *sbi,
struct curseg_info *seg, block_t start)
{
struct seg_entry *se = get_seg_entry(sbi, seg->segno);
int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
unsigned long *target_map = SIT_I(sbi)->tmp_map;
unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
int i, pos;
for (i = 0; i < entries; i++)
target_map[i] = ckpt_map[i] | cur_map[i];
pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
seg->next_blkoff = pos;
}
/*
* If a segment is written by LFS manner, next block offset is just obtained
* by increasing the current block offset. However, if a segment is written by
* SSR manner, next block offset obtained by calling __next_free_blkoff
*/
static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
struct curseg_info *seg)
{
if (seg->alloc_type == SSR)
__next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
else
seg->next_blkoff++;
}
/*
* This function always allocates a used segment(from dirty seglist) by SSR
* manner, so it should recover the existing segment information of valid blocks
*/
static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
unsigned int new_segno = curseg->next_segno;
struct f2fs_summary_block *sum_node;
struct page *sum_page;
write_sum_page(sbi, curseg->sum_blk,
GET_SUM_BLOCK(sbi, curseg->segno));
__set_test_and_inuse(sbi, new_segno);
mutex_lock(&dirty_i->seglist_lock);
__remove_dirty_segment(sbi, new_segno, PRE);
__remove_dirty_segment(sbi, new_segno, DIRTY);
mutex_unlock(&dirty_i->seglist_lock);
reset_curseg(sbi, type, 1);
curseg->alloc_type = SSR;
__next_free_blkoff(sbi, curseg, 0);
if (reuse) {
sum_page = get_sum_page(sbi, new_segno);
sum_node = (struct f2fs_summary_block *)page_address(sum_page);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
f2fs_put_page(sum_page, 1);
}
}
static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
unsigned segno = NULL_SEGNO;
int i, cnt;
bool reversed = false;
/* need_SSR() already forces to do this */
if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
curseg->next_segno = segno;
return 1;
}
/* For node segments, let's do SSR more intensively */
if (IS_NODESEG(type)) {
if (type >= CURSEG_WARM_NODE) {
reversed = true;
i = CURSEG_COLD_NODE;
} else {
i = CURSEG_HOT_NODE;
}
cnt = NR_CURSEG_NODE_TYPE;
} else {
if (type >= CURSEG_WARM_DATA) {
reversed = true;
i = CURSEG_COLD_DATA;
} else {
i = CURSEG_HOT_DATA;
}
cnt = NR_CURSEG_DATA_TYPE;
}
for (; cnt-- > 0; reversed ? i-- : i++) {
if (i == type)
continue;
if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
curseg->next_segno = segno;
return 1;
}
}
return 0;
}
/*
* flush out current segment and replace it with new segment
* This function should be returned with success, otherwise BUG
*/
static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
int type, bool force)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
if (force)
new_curseg(sbi, type, true);
else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
new_curseg(sbi, type, false);
else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
change_curseg(sbi, type, true);
else
new_curseg(sbi, type, false);
stat_inc_seg_type(sbi, curseg);
}
void allocate_new_segments(struct f2fs_sb_info *sbi)
{
struct curseg_info *curseg;
unsigned int old_segno;
int i;
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
curseg = CURSEG_I(sbi, i);
old_segno = curseg->segno;
SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
locate_dirty_segment(sbi, old_segno);
}
}
static const struct segment_allocation default_salloc_ops = {
.allocate_segment = allocate_segment_by_default,
};
bool exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
__u64 trim_start = cpc->trim_start;
bool has_candidate = false;
mutex_lock(&SIT_I(sbi)->sentry_lock);
for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
if (add_discard_addrs(sbi, cpc, true)) {
has_candidate = true;
break;
}
}
mutex_unlock(&SIT_I(sbi)->sentry_lock);
cpc->trim_start = trim_start;
return has_candidate;
}
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
{
__u64 start = F2FS_BYTES_TO_BLK(range->start);
__u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
unsigned int start_segno, end_segno;
struct cp_control cpc;
int err = 0;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
cpc.trimmed = 0;
if (end <= MAIN_BLKADDR(sbi))
goto out;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
f2fs_msg(sbi->sb, KERN_WARNING,
"Found FS corruption, run fsck to fix.");
goto out;
}
/* start/end segment number in main_area */
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
/* do checkpoint to issue discard commands safely */
for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
cpc.trim_start = start_segno;
if (sbi->discard_blks == 0)
break;
else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
cpc.trim_end = end_segno;
else
cpc.trim_end = min_t(unsigned int,
rounddown(start_segno +
BATCHED_TRIM_SEGMENTS(sbi),
sbi->segs_per_sec) - 1, end_segno);
mutex_lock(&sbi->gc_mutex);
err = write_checkpoint(sbi, &cpc);
mutex_unlock(&sbi->gc_mutex);
if (err)
break;
schedule();
}
out:
range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
return err;
}
static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
if (curseg->next_blkoff < sbi->blocks_per_seg)
return true;
return false;
}
static int __get_segment_type_2(struct f2fs_io_info *fio)
{
if (fio->type == DATA)
return CURSEG_HOT_DATA;
else
return CURSEG_HOT_NODE;
}
static int __get_segment_type_4(struct f2fs_io_info *fio)
{
if (fio->type == DATA) {
struct inode *inode = fio->page->mapping->host;
if (S_ISDIR(inode->i_mode))
return CURSEG_HOT_DATA;
else
return CURSEG_COLD_DATA;
} else {
if (IS_DNODE(fio->page) && is_cold_node(fio->page))
return CURSEG_WARM_NODE;
else
return CURSEG_COLD_NODE;
}
}
static int __get_segment_type_6(struct f2fs_io_info *fio)
{
if (fio->type == DATA) {
struct inode *inode = fio->page->mapping->host;
if (is_cold_data(fio->page) || file_is_cold(inode))
return CURSEG_COLD_DATA;
if (is_inode_flag_set(inode, FI_HOT_DATA))
return CURSEG_HOT_DATA;
return CURSEG_WARM_DATA;
} else {
if (IS_DNODE(fio->page))
return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
CURSEG_HOT_NODE;
return CURSEG_COLD_NODE;
}
}
static int __get_segment_type(struct f2fs_io_info *fio)
{
int type = 0;
switch (fio->sbi->active_logs) {
case 2:
type = __get_segment_type_2(fio);
break;
case 4:
type = __get_segment_type_4(fio);
break;
case 6:
type = __get_segment_type_6(fio);
break;
default:
f2fs_bug_on(fio->sbi, true);
}
if (IS_HOT(type))
fio->temp = HOT;
else if (IS_WARM(type))
fio->temp = WARM;
else
fio->temp = COLD;
return type;
}
void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio, bool add_list)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
*new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
f2fs_wait_discard_bio(sbi, *new_blkaddr);
/*
* __add_sum_entry should be resided under the curseg_mutex
* because, this function updates a summary entry in the
* current summary block.
*/
__add_sum_entry(sbi, type, sum);
__refresh_next_blkoff(sbi, curseg);
stat_inc_block_count(sbi, curseg);
if (!__has_curseg_space(sbi, type))
sit_i->s_ops->allocate_segment(sbi, type, false);
/*
* SIT information should be updated after segment allocation,
* since we need to keep dirty segments precisely under SSR.
*/
refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
mutex_unlock(&sit_i->sentry_lock);
if (page && IS_NODESEG(type))
fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
if (add_list) {
struct f2fs_bio_info *io;
INIT_LIST_HEAD(&fio->list);
fio->in_list = true;
io = sbi->write_io[fio->type] + fio->temp;
spin_lock(&io->io_lock);
list_add_tail(&fio->list, &io->io_list);
spin_unlock(&io->io_lock);
}
mutex_unlock(&curseg->curseg_mutex);
}
static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
int type = __get_segment_type(fio);
int err;
reallocate:
allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio, true);
/* writeout dirty page into bdev */
err = f2fs_submit_page_write(fio);
if (err == -EAGAIN) {
fio->old_blkaddr = fio->new_blkaddr;
goto reallocate;
}
}
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
{
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
.op = REQ_OP_WRITE,
.op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
.old_blkaddr = page->index,
.new_blkaddr = page->index,
.page = page,
.encrypted_page = NULL,
.in_list = false,
};
if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
fio.op_flags &= ~REQ_META;
set_page_writeback(page);
f2fs_submit_page_write(&fio);
}
void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
{
struct f2fs_summary sum;
set_summary(&sum, nid, 0, 0);
do_write_page(&sum, fio);
}
void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = fio->sbi;
struct f2fs_summary sum;
struct node_info ni;
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
do_write_page(&sum, fio);
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
}
int rewrite_data_page(struct f2fs_io_info *fio)
{
fio->new_blkaddr = fio->old_blkaddr;
stat_inc_inplace_blocks(fio->sbi);
return f2fs_submit_page_bio(fio);
}
void __f2fs_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
block_t old_blkaddr, block_t new_blkaddr,
bool recover_curseg, bool recover_newaddr)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg;
unsigned int segno, old_cursegno;
struct seg_entry *se;
int type;
unsigned short old_blkoff;
segno = GET_SEGNO(sbi, new_blkaddr);
se = get_seg_entry(sbi, segno);
type = se->type;
if (!recover_curseg) {
/* for recovery flow */
if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
if (old_blkaddr == NULL_ADDR)
type = CURSEG_COLD_DATA;
else
type = CURSEG_WARM_DATA;
}
} else {
if (!IS_CURSEG(sbi, segno))
type = CURSEG_WARM_DATA;
}
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
mutex_lock(&sit_i->sentry_lock);
old_cursegno = curseg->segno;
old_blkoff = curseg->next_blkoff;
/* change the current segment */
if (segno != curseg->segno) {
curseg->next_segno = segno;
change_curseg(sbi, type, true);
}
curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
__add_sum_entry(sbi, type, sum);
if (!recover_curseg || recover_newaddr)
update_sit_entry(sbi, new_blkaddr, 1);
if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
update_sit_entry(sbi, old_blkaddr, -1);
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
locate_dirty_segment(sbi, old_cursegno);
if (recover_curseg) {
if (old_cursegno != curseg->segno) {
curseg->next_segno = old_cursegno;
change_curseg(sbi, type, true);
}
curseg->next_blkoff = old_blkoff;
}
mutex_unlock(&sit_i->sentry_lock);
mutex_unlock(&curseg->curseg_mutex);
}
void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
block_t old_addr, block_t new_addr,
unsigned char version, bool recover_curseg,
bool recover_newaddr)
{
struct f2fs_summary sum;
set_summary(&sum, dn->nid, dn->ofs_in_node, version);
__f2fs_replace_block(sbi, &sum, old_addr, new_addr,
recover_curseg, recover_newaddr);
f2fs_update_data_blkaddr(dn, new_addr);
}
void f2fs_wait_on_page_writeback(struct page *page,
enum page_type type, bool ordered)
{
if (PageWriteback(page)) {
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
f2fs_submit_merged_write_cond(sbi, page->mapping->host,
0, page->index, type);
if (ordered)
wait_on_page_writeback(page);
else
wait_for_stable_page(page);
}
}
void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
block_t blkaddr)
{
struct page *cpage;
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return;
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
if (cpage) {
f2fs_wait_on_page_writeback(cpage, DATA, true);
f2fs_put_page(cpage, 1);
}
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
unsigned char *kaddr;
struct page *page;
block_t start;
int i, j, offset;
start = start_sum_block(sbi);
page = get_meta_page(sbi, start++);
kaddr = (unsigned char *)page_address(page);
/* Step 1: restore nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
/* Step 2: restore sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
offset = 2 * SUM_JOURNAL_SIZE;
/* Step 3: restore summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
unsigned short blk_off;
unsigned int segno;
seg_i = CURSEG_I(sbi, i);
segno = le32_to_cpu(ckpt->cur_data_segno[i]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
seg_i->next_segno = segno;
reset_curseg(sbi, i, 0);
seg_i->alloc_type = ckpt->alloc_type[i];
seg_i->next_blkoff = blk_off;
if (seg_i->alloc_type == SSR)
blk_off = sbi->blocks_per_seg;
for (j = 0; j < blk_off; j++) {
struct f2fs_summary *s;
s = (struct f2fs_summary *)(kaddr + offset);
seg_i->sum_blk->entries[j] = *s;
offset += SUMMARY_SIZE;
if (offset + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
f2fs_put_page(page, 1);
page = NULL;
page = get_meta_page(sbi, start++);
kaddr = (unsigned char *)page_address(page);
offset = 0;
}
}
f2fs_put_page(page, 1);
return 0;
}
static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_summary_block *sum;
struct curseg_info *curseg;
struct page *new;
unsigned short blk_off;
unsigned int segno = 0;
block_t blk_addr = 0;
/* get segment number and block addr */
if (IS_DATASEG(type)) {
segno = le32_to_cpu(ckpt->cur_data_segno[type]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
CURSEG_HOT_DATA]);
if (__exist_node_summaries(sbi))
blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
else
blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
} else {
segno = le32_to_cpu(ckpt->cur_node_segno[type -
CURSEG_HOT_NODE]);
blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
CURSEG_HOT_NODE]);
if (__exist_node_summaries(sbi))
blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
type - CURSEG_HOT_NODE);
else
blk_addr = GET_SUM_BLOCK(sbi, segno);
}
new = get_meta_page(sbi, blk_addr);
sum = (struct f2fs_summary_block *)page_address(new);
if (IS_NODESEG(type)) {
if (__exist_node_summaries(sbi)) {
struct f2fs_summary *ns = &sum->entries[0];
int i;
for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
ns->version = 0;
ns->ofs_in_node = 0;
}
} else {
int err;
err = restore_node_summary(sbi, segno, sum);
if (err) {
f2fs_put_page(new, 1);
return err;
}
}
}
/* set uncompleted segment to curseg */
curseg = CURSEG_I(sbi, type);
mutex_lock(&curseg->curseg_mutex);
/* update journal info */
down_write(&curseg->journal_rwsem);
memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
up_write(&curseg->journal_rwsem);
memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
curseg->next_segno = segno;
reset_curseg(sbi, type, 0);
curseg->alloc_type = ckpt->alloc_type[type];
curseg->next_blkoff = blk_off;
mutex_unlock(&curseg->curseg_mutex);
f2fs_put_page(new, 1);
return 0;
}
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
{
int type = CURSEG_HOT_DATA;
int err;
if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
int npages = npages_for_summary_flush(sbi, true);
if (npages >= 2)
ra_meta_pages(sbi, start_sum_block(sbi), npages,
META_CP, true);
/* restore for compacted data summary */
if (read_compacted_summaries(sbi))
return -EINVAL;
type = CURSEG_HOT_NODE;
}
if (__exist_node_summaries(sbi))
ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
NR_CURSEG_TYPE - type, META_CP, true);
for (; type <= CURSEG_COLD_NODE; type++) {
err = read_normal_summaries(sbi, type);
if (err)
return err;
}
return 0;
}
static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
{
struct page *page;
unsigned char *kaddr;
struct f2fs_summary *summary;
struct curseg_info *seg_i;
int written_size = 0;
int i, j;
page = grab_meta_page(sbi, blkaddr++);
kaddr = (unsigned char *)page_address(page);
/* Step 1: write nat cache */
seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 2: write sit cache */
seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
written_size += SUM_JOURNAL_SIZE;
/* Step 3: write summary entries */
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
unsigned short blkoff;
seg_i = CURSEG_I(sbi, i);
if (sbi->ckpt->alloc_type[i] == SSR)
blkoff = sbi->blocks_per_seg;
else
blkoff = curseg_blkoff(sbi, i);
for (j = 0; j < blkoff; j++) {
if (!page) {
page = grab_meta_page(sbi, blkaddr++);
kaddr = (unsigned char *)page_address(page);
written_size = 0;
}
summary = (struct f2fs_summary *)(kaddr + written_size);
*summary = seg_i->sum_blk->entries[j];
written_size += SUMMARY_SIZE;
if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
SUM_FOOTER_SIZE)
continue;
set_page_dirty(page);
f2fs_put_page(page, 1);
page = NULL;
}
}
if (page) {
set_page_dirty(page);
f2fs_put_page(page, 1);
}
}
static void write_normal_summaries(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
int i, end;
if (IS_DATASEG(type))
end = type + NR_CURSEG_DATA_TYPE;
else
end = type + NR_CURSEG_NODE_TYPE;
for (i = type; i < end; i++)
write_current_sum_page(sbi, i, blkaddr + (i - type));
}
void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
write_compacted_summaries(sbi, start_blk);
else
write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
}
void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
}
int lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
unsigned int val, int alloc)
{
int i;
if (type == NAT_JOURNAL) {
for (i = 0; i < nats_in_cursum(journal); i++) {
if (le32_to_cpu(nid_in_journal(journal, i)) == val)
return i;
}
if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
return update_nats_in_cursum(journal, 1);
} else if (type == SIT_JOURNAL) {
for (i = 0; i < sits_in_cursum(journal); i++)
if (le32_to_cpu(segno_in_journal(journal, i)) == val)
return i;
if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
return update_sits_in_cursum(journal, 1);
}
return -1;
}
static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
unsigned int segno)
{
return get_meta_page(sbi, current_sit_addr(sbi, segno));
}
static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
unsigned int start)
{
struct sit_info *sit_i = SIT_I(sbi);
struct page *src_page, *dst_page;
pgoff_t src_off, dst_off;
void *src_addr, *dst_addr;
src_off = current_sit_addr(sbi, start);
dst_off = next_sit_addr(sbi, src_off);
/* get current sit block page without lock */
src_page = get_meta_page(sbi, src_off);
dst_page = grab_meta_page(sbi, dst_off);
f2fs_bug_on(sbi, PageDirty(src_page));
src_addr = page_address(src_page);
dst_addr = page_address(dst_page);
memcpy(dst_addr, src_addr, PAGE_SIZE);
set_page_dirty(dst_page);
f2fs_put_page(src_page, 1);
set_to_next_sit(sit_i, start);
return dst_page;
}
static struct sit_entry_set *grab_sit_entry_set(void)
{
struct sit_entry_set *ses =
f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
ses->entry_cnt = 0;
INIT_LIST_HEAD(&ses->set_list);
return ses;
}
static void release_sit_entry_set(struct sit_entry_set *ses)
{
list_del(&ses->set_list);
kmem_cache_free(sit_entry_set_slab, ses);
}
static void adjust_sit_entry_set(struct sit_entry_set *ses,
struct list_head *head)
{
struct sit_entry_set *next = ses;
if (list_is_last(&ses->set_list, head))
return;
list_for_each_entry_continue(next, head, set_list)
if (ses->entry_cnt <= next->entry_cnt)
break;
list_move_tail(&ses->set_list, &next->set_list);
}
static void add_sit_entry(unsigned int segno, struct list_head *head)
{
struct sit_entry_set *ses;
unsigned int start_segno = START_SEGNO(segno);
list_for_each_entry(ses, head, set_list) {
if (ses->start_segno == start_segno) {
ses->entry_cnt++;
adjust_sit_entry_set(ses, head);
return;
}
}
ses = grab_sit_entry_set();
ses->start_segno = start_segno;
ses->entry_cnt++;
list_add(&ses->set_list, head);
}
static void add_sits_in_set(struct f2fs_sb_info *sbi)
{
struct f2fs_sm_info *sm_info = SM_I(sbi);
struct list_head *set_list = &sm_info->sit_entry_set;
unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
unsigned int segno;
for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
add_sit_entry(segno, set_list);
}
static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
{
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_journal *journal = curseg->journal;
int i;
down_write(&curseg->journal_rwsem);
for (i = 0; i < sits_in_cursum(journal); i++) {
unsigned int segno;
bool dirtied;
segno = le32_to_cpu(segno_in_journal(journal, i));
dirtied = __mark_sit_entry_dirty(sbi, segno);
if (!dirtied)
add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
}
update_sits_in_cursum(journal, -i);
up_write(&curseg->journal_rwsem);
}
/*
* CP calls this function, which flushes SIT entries including sit_journal,
* and moves prefree segs to free segs.
*/
void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct sit_info *sit_i = SIT_I(sbi);
unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_journal *journal = curseg->journal;
struct sit_entry_set *ses, *tmp;
struct list_head *head = &SM_I(sbi)->sit_entry_set;
bool to_journal = true;
struct seg_entry *se;
mutex_lock(&sit_i->sentry_lock);
if (!sit_i->dirty_sentries)
goto out;
/*
* add and account sit entries of dirty bitmap in sit entry
* set temporarily
*/
add_sits_in_set(sbi);
/*
* if there are no enough space in journal to store dirty sit
* entries, remove all entries from journal and add and account
* them in sit entry set.
*/
if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL))
remove_sits_in_journal(sbi);
/*
* there are two steps to flush sit entries:
* #1, flush sit entries to journal in current cold data summary block.
* #2, flush sit entries to sit page.
*/
list_for_each_entry_safe(ses, tmp, head, set_list) {
struct page *page = NULL;
struct f2fs_sit_block *raw_sit = NULL;
unsigned int start_segno = ses->start_segno;
unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
(unsigned long)MAIN_SEGS(sbi));
unsigned int segno = start_segno;
if (to_journal &&
!__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
to_journal = false;
if (to_journal) {
down_write(&curseg->journal_rwsem);
} else {
page = get_next_sit_page(sbi, start_segno);
raw_sit = page_address(page);
}
/* flush dirty sit entries in region of current sit set */
for_each_set_bit_from(segno, bitmap, end) {
int offset, sit_offset;
se = get_seg_entry(sbi, segno);
/* add discard candidates */
if (!(cpc->reason & CP_DISCARD)) {
cpc->trim_start = segno;
add_discard_addrs(sbi, cpc, false);
}
if (to_journal) {
offset = lookup_journal_in_cursum(journal,
SIT_JOURNAL, segno, 1);
f2fs_bug_on(sbi, offset < 0);
segno_in_journal(journal, offset) =
cpu_to_le32(segno);
seg_info_to_raw_sit(se,
&sit_in_journal(journal, offset));
} else {
sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
seg_info_to_raw_sit(se,
&raw_sit->entries[sit_offset]);
}
__clear_bit(segno, bitmap);
sit_i->dirty_sentries--;
ses->entry_cnt--;
}
if (to_journal)
up_write(&curseg->journal_rwsem);
else
f2fs_put_page(page, 1);
f2fs_bug_on(sbi, ses->entry_cnt);
release_sit_entry_set(ses);
}
f2fs_bug_on(sbi, !list_empty(head));
f2fs_bug_on(sbi, sit_i->dirty_sentries);
out:
if (cpc->reason & CP_DISCARD) {
__u64 trim_start = cpc->trim_start;
for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
add_discard_addrs(sbi, cpc, false);
cpc->trim_start = trim_start;
}
mutex_unlock(&sit_i->sentry_lock);
set_prefree_as_free_segments(sbi);
}
static int build_sit_info(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct sit_info *sit_i;
unsigned int sit_segs, start;
char *src_bitmap;
unsigned int bitmap_size;
/* allocate memory for SIT information */
sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
if (!sit_i)
return -ENOMEM;
SM_I(sbi)->sit_info = sit_i;
sit_i->sentries = f2fs_kvzalloc(MAIN_SEGS(sbi) *
sizeof(struct seg_entry), GFP_KERNEL);
if (!sit_i->sentries)
return -ENOMEM;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
if (!sit_i->dirty_sentries_bitmap)
return -ENOMEM;
for (start = 0; start < MAIN_SEGS(sbi); start++) {
sit_i->sentries[start].cur_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
sit_i->sentries[start].ckpt_valid_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map ||
!sit_i->sentries[start].ckpt_valid_map)
return -ENOMEM;
#ifdef CONFIG_F2FS_CHECK_FS
sit_i->sentries[start].cur_valid_map_mir
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].cur_valid_map_mir)
return -ENOMEM;
#endif
if (f2fs_discard_en(sbi)) {
sit_i->sentries[start].discard_map
= kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->sentries[start].discard_map)
return -ENOMEM;
}
}
sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
if (!sit_i->tmp_map)
return -ENOMEM;
if (sbi->segs_per_sec > 1) {
sit_i->sec_entries = f2fs_kvzalloc(MAIN_SECS(sbi) *
sizeof(struct sec_entry), GFP_KERNEL);
if (!sit_i->sec_entries)
return -ENOMEM;
}
/* get information related with SIT */
sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
/* setup SIT bitmap from ckeckpoint pack */
bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
sit_i->sit_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
if (!sit_i->sit_bitmap)
return -ENOMEM;
#ifdef CONFIG_F2FS_CHECK_FS
sit_i->sit_bitmap_mir = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
if (!sit_i->sit_bitmap_mir)
return -ENOMEM;
#endif
/* init SIT information */
sit_i->s_ops = &default_salloc_ops;
sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
sit_i->written_valid_blocks = 0;
sit_i->bitmap_size = bitmap_size;
sit_i->dirty_sentries = 0;
sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
mutex_init(&sit_i->sentry_lock);
return 0;
}
static int build_free_segmap(struct f2fs_sb_info *sbi)
{
struct free_segmap_info *free_i;
unsigned int bitmap_size, sec_bitmap_size;
/* allocate memory for free segmap information */
free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
if (!free_i)
return -ENOMEM;
SM_I(sbi)->free_info = free_i;
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
free_i->free_segmap = f2fs_kvmalloc(bitmap_size, GFP_KERNEL);
if (!free_i->free_segmap)
return -ENOMEM;
sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
free_i->free_secmap = f2fs_kvmalloc(sec_bitmap_size, GFP_KERNEL);
if (!free_i->free_secmap)
return -ENOMEM;
/* set all segments as dirty temporarily */
memset(free_i->free_segmap, 0xff, bitmap_size);
memset(free_i->free_secmap, 0xff, sec_bitmap_size);
/* init free segmap information */
free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
free_i->free_segments = 0;
free_i->free_sections = 0;
spin_lock_init(&free_i->segmap_lock);
return 0;
}
static int build_curseg(struct f2fs_sb_info *sbi)
{
struct curseg_info *array;
int i;
array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
if (!array)
return -ENOMEM;
SM_I(sbi)->curseg_array = array;
for (i = 0; i < NR_CURSEG_TYPE; i++) {
mutex_init(&array[i].curseg_mutex);
array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!array[i].sum_blk)
return -ENOMEM;
init_rwsem(&array[i].journal_rwsem);
array[i].journal = kzalloc(sizeof(struct f2fs_journal),
GFP_KERNEL);
if (!array[i].journal)
return -ENOMEM;
array[i].segno = NULL_SEGNO;
array[i].next_blkoff = 0;
}
return restore_curseg_summaries(sbi);
}
static void build_sit_entries(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
struct f2fs_journal *journal = curseg->journal;
struct seg_entry *se;
struct f2fs_sit_entry sit;
int sit_blk_cnt = SIT_BLK_CNT(sbi);
unsigned int i, start, end;
unsigned int readed, start_blk = 0;
do {
readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
META_SIT, true);
start = start_blk * sit_i->sents_per_block;
end = (start_blk + readed) * sit_i->sents_per_block;
for (; start < end && start < MAIN_SEGS(sbi); start++) {
struct f2fs_sit_block *sit_blk;
struct page *page;
se = &sit_i->sentries[start];
page = get_current_sit_page(sbi, start);
sit_blk = (struct f2fs_sit_block *)page_address(page);
sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
f2fs_put_page(page, 1);
check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit);
/* build discard map only one time */
if (f2fs_discard_en(sbi)) {
if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
memset(se->discard_map, 0xff,
SIT_VBLOCK_MAP_SIZE);
} else {
memcpy(se->discard_map,
se->cur_valid_map,
SIT_VBLOCK_MAP_SIZE);
sbi->discard_blks +=
sbi->blocks_per_seg -
se->valid_blocks;
}
}
if (sbi->segs_per_sec > 1)
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks;
}
start_blk += readed;
} while (start_blk < sit_blk_cnt);
down_read(&curseg->journal_rwsem);
for (i = 0; i < sits_in_cursum(journal); i++) {
unsigned int old_valid_blocks;
start = le32_to_cpu(segno_in_journal(journal, i));
se = &sit_i->sentries[start];
sit = sit_in_journal(journal, i);
old_valid_blocks = se->valid_blocks;
check_block_count(sbi, start, &sit);
seg_info_from_raw_sit(se, &sit);
if (f2fs_discard_en(sbi)) {
if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
memset(se->discard_map, 0xff,
SIT_VBLOCK_MAP_SIZE);
} else {
memcpy(se->discard_map, se->cur_valid_map,
SIT_VBLOCK_MAP_SIZE);
sbi->discard_blks += old_valid_blocks -
se->valid_blocks;
}
}
if (sbi->segs_per_sec > 1)
get_sec_entry(sbi, start)->valid_blocks +=
se->valid_blocks - old_valid_blocks;
}
up_read(&curseg->journal_rwsem);
}
static void init_free_segmap(struct f2fs_sb_info *sbi)
{
unsigned int start;
int type;
for (start = 0; start < MAIN_SEGS(sbi); start++) {
struct seg_entry *sentry = get_seg_entry(sbi, start);
if (!sentry->valid_blocks)
__set_free(sbi, start);
else
SIT_I(sbi)->written_valid_blocks +=
sentry->valid_blocks;
}
/* set use the current segments */
for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
struct curseg_info *curseg_t = CURSEG_I(sbi, type);
__set_test_and_inuse(sbi, curseg_t->segno);
}
}
static void init_dirty_segmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
struct free_segmap_info *free_i = FREE_I(sbi);
unsigned int segno = 0, offset = 0;
unsigned short valid_blocks;
while (1) {
/* find dirty segment based on free segmap */
segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
if (segno >= MAIN_SEGS(sbi))
break;
offset = segno + 1;
valid_blocks = get_valid_blocks(sbi, segno, false);
if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
continue;
if (valid_blocks > sbi->blocks_per_seg) {
f2fs_bug_on(sbi, 1);
continue;
}
mutex_lock(&dirty_i->seglist_lock);
__locate_dirty_segment(sbi, segno, DIRTY);
mutex_unlock(&dirty_i->seglist_lock);
}
}
static int init_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
dirty_i->victim_secmap = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->victim_secmap)
return -ENOMEM;
return 0;
}
static int build_dirty_segmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i;
unsigned int bitmap_size, i;
/* allocate memory for dirty segments list information */
dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
if (!dirty_i)
return -ENOMEM;
SM_I(sbi)->dirty_info = dirty_i;
mutex_init(&dirty_i->seglist_lock);
bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
for (i = 0; i < NR_DIRTY_TYPE; i++) {
dirty_i->dirty_segmap[i] = f2fs_kvzalloc(bitmap_size, GFP_KERNEL);
if (!dirty_i->dirty_segmap[i])
return -ENOMEM;
}
init_dirty_segmap(sbi);
return init_victim_secmap(sbi);
}
/*
* Update min, max modified time for cost-benefit GC algorithm
*/
static void init_min_max_mtime(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
unsigned int segno;
mutex_lock(&sit_i->sentry_lock);
sit_i->min_mtime = LLONG_MAX;
for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
unsigned int i;
unsigned long long mtime = 0;
for (i = 0; i < sbi->segs_per_sec; i++)
mtime += get_seg_entry(sbi, segno + i)->mtime;
mtime = div_u64(mtime, sbi->segs_per_sec);
if (sit_i->min_mtime > mtime)
sit_i->min_mtime = mtime;
}
sit_i->max_mtime = get_mtime(sbi);
mutex_unlock(&sit_i->sentry_lock);
}
int build_segment_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct f2fs_sm_info *sm_info;
int err;
sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
if (!sm_info)
return -ENOMEM;
/* init sm info */
sbi->sm_info = sm_info;
sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
if (!test_opt(sbi, LFS))
sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
INIT_LIST_HEAD(&sm_info->sit_entry_set);
if (!f2fs_readonly(sbi->sb)) {
err = create_flush_cmd_control(sbi);
if (err)
return err;
}
err = create_discard_cmd_control(sbi);
if (err)
return err;
err = build_sit_info(sbi);
if (err)
return err;
err = build_free_segmap(sbi);
if (err)
return err;
err = build_curseg(sbi);
if (err)
return err;
/* reinit free segmap based on SIT */
build_sit_entries(sbi);
init_free_segmap(sbi);
err = build_dirty_segmap(sbi);
if (err)
return err;
init_min_max_mtime(sbi);
return 0;
}
static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
enum dirty_type dirty_type)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
mutex_lock(&dirty_i->seglist_lock);
kvfree(dirty_i->dirty_segmap[dirty_type]);
dirty_i->nr_dirty[dirty_type] = 0;
mutex_unlock(&dirty_i->seglist_lock);
}
static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
kvfree(dirty_i->victim_secmap);
}
static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
{
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
int i;
if (!dirty_i)
return;
/* discard pre-free/dirty segments list */
for (i = 0; i < NR_DIRTY_TYPE; i++)
discard_dirty_segmap(sbi, i);
destroy_victim_secmap(sbi);
SM_I(sbi)->dirty_info = NULL;
kfree(dirty_i);
}
static void destroy_curseg(struct f2fs_sb_info *sbi)
{
struct curseg_info *array = SM_I(sbi)->curseg_array;
int i;
if (!array)
return;
SM_I(sbi)->curseg_array = NULL;
for (i = 0; i < NR_CURSEG_TYPE; i++) {
kfree(array[i].sum_blk);
kfree(array[i].journal);
}
kfree(array);
}
static void destroy_free_segmap(struct f2fs_sb_info *sbi)
{
struct free_segmap_info *free_i = SM_I(sbi)->free_info;
if (!free_i)
return;
SM_I(sbi)->free_info = NULL;
kvfree(free_i->free_segmap);
kvfree(free_i->free_secmap);
kfree(free_i);
}
static void destroy_sit_info(struct f2fs_sb_info *sbi)
{
struct sit_info *sit_i = SIT_I(sbi);
unsigned int start;
if (!sit_i)
return;
if (sit_i->sentries) {
for (start = 0; start < MAIN_SEGS(sbi); start++) {
kfree(sit_i->sentries[start].cur_valid_map);
#ifdef CONFIG_F2FS_CHECK_FS
kfree(sit_i->sentries[start].cur_valid_map_mir);
#endif
kfree(sit_i->sentries[start].ckpt_valid_map);
kfree(sit_i->sentries[start].discard_map);
}
}
kfree(sit_i->tmp_map);
kvfree(sit_i->sentries);
kvfree(sit_i->sec_entries);
kvfree(sit_i->dirty_sentries_bitmap);
SM_I(sbi)->sit_info = NULL;
kfree(sit_i->sit_bitmap);
#ifdef CONFIG_F2FS_CHECK_FS
kfree(sit_i->sit_bitmap_mir);
#endif
kfree(sit_i);
}
void destroy_segment_manager(struct f2fs_sb_info *sbi)
{
struct f2fs_sm_info *sm_info = SM_I(sbi);
if (!sm_info)
return;
destroy_flush_cmd_control(sbi, true);
destroy_discard_cmd_control(sbi);
destroy_dirty_segmap(sbi);
destroy_curseg(sbi);
destroy_free_segmap(sbi);
destroy_sit_info(sbi);
sbi->sm_info = NULL;
kfree(sm_info);
}
int __init create_segment_manager_caches(void)
{
discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
sizeof(struct discard_entry));
if (!discard_entry_slab)
goto fail;
discard_cmd_slab = f2fs_kmem_cache_create("discard_cmd",
sizeof(struct discard_cmd));
if (!discard_cmd_slab)
goto destroy_discard_entry;
sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
sizeof(struct sit_entry_set));
if (!sit_entry_set_slab)
goto destroy_discard_cmd;
inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
sizeof(struct inmem_pages));
if (!inmem_entry_slab)
goto destroy_sit_entry_set;
return 0;
destroy_sit_entry_set:
kmem_cache_destroy(sit_entry_set_slab);
destroy_discard_cmd:
kmem_cache_destroy(discard_cmd_slab);
destroy_discard_entry:
kmem_cache_destroy(discard_entry_slab);
fail:
return -ENOMEM;
}
void destroy_segment_manager_caches(void)
{
kmem_cache_destroy(sit_entry_set_slab);
kmem_cache_destroy(discard_cmd_slab);
kmem_cache_destroy(discard_entry_slab);
kmem_cache_destroy(inmem_entry_slab);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3024_0 |
crossvul-cpp_data_good_659_0 | /*
* Broadcom UniMAC MDIO bus controller driver
*
* Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_mdio.h>
#include <linux/platform_data/mdio-bcm-unimac.h>
#define MDIO_CMD 0x00
#define MDIO_START_BUSY (1 << 29)
#define MDIO_READ_FAIL (1 << 28)
#define MDIO_RD (2 << 26)
#define MDIO_WR (1 << 26)
#define MDIO_PMD_SHIFT 21
#define MDIO_PMD_MASK 0x1F
#define MDIO_REG_SHIFT 16
#define MDIO_REG_MASK 0x1F
#define MDIO_CFG 0x04
#define MDIO_C22 (1 << 0)
#define MDIO_C45 0
#define MDIO_CLK_DIV_SHIFT 4
#define MDIO_CLK_DIV_MASK 0x3F
#define MDIO_SUPP_PREAMBLE (1 << 12)
struct unimac_mdio_priv {
struct mii_bus *mii_bus;
void __iomem *base;
int (*wait_func) (void *wait_func_data);
void *wait_func_data;
};
static inline u32 unimac_mdio_readl(struct unimac_mdio_priv *priv, u32 offset)
{
/* MIPS chips strapped for BE will automagically configure the
* peripheral registers for CPU-native byte order.
*/
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
return __raw_readl(priv->base + offset);
else
return readl_relaxed(priv->base + offset);
}
static inline void unimac_mdio_writel(struct unimac_mdio_priv *priv, u32 val,
u32 offset)
{
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
__raw_writel(val, priv->base + offset);
else
writel_relaxed(val, priv->base + offset);
}
static inline void unimac_mdio_start(struct unimac_mdio_priv *priv)
{
u32 reg;
reg = unimac_mdio_readl(priv, MDIO_CMD);
reg |= MDIO_START_BUSY;
unimac_mdio_writel(priv, reg, MDIO_CMD);
}
static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv)
{
return unimac_mdio_readl(priv, MDIO_CMD) & MDIO_START_BUSY;
}
static int unimac_mdio_poll(void *wait_func_data)
{
struct unimac_mdio_priv *priv = wait_func_data;
unsigned int timeout = 1000;
do {
if (!unimac_mdio_busy(priv))
return 0;
usleep_range(1000, 2000);
} while (--timeout);
if (!timeout)
return -ETIMEDOUT;
return 0;
}
static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct unimac_mdio_priv *priv = bus->priv;
int ret;
u32 cmd;
/* Prepare the read operation */
cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
unimac_mdio_writel(priv, cmd, MDIO_CMD);
/* Start MDIO transaction */
unimac_mdio_start(priv);
ret = priv->wait_func(priv->wait_func_data);
if (ret)
return ret;
cmd = unimac_mdio_readl(priv, MDIO_CMD);
/* Some broken devices are known not to release the line during
* turn-around, e.g: Broadcom BCM53125 external switches, so check for
* that condition here and ignore the MDIO controller read failure
* indication.
*/
if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL))
return -EIO;
return cmd & 0xffff;
}
static int unimac_mdio_write(struct mii_bus *bus, int phy_id,
int reg, u16 val)
{
struct unimac_mdio_priv *priv = bus->priv;
u32 cmd;
/* Prepare the write operation */
cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
(reg << MDIO_REG_SHIFT) | (0xffff & val);
unimac_mdio_writel(priv, cmd, MDIO_CMD);
unimac_mdio_start(priv);
return priv->wait_func(priv->wait_func_data);
}
/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with
* their internal MDIO management controller making them fail to successfully
* be read from or written to for the first transaction. We insert a dummy
* BMSR read here to make sure that phy_get_device() and get_phy_id() can
* correctly read the PHY MII_PHYSID1/2 registers and successfully register a
* PHY device for this peripheral.
*
* Once the PHY driver is registered, we can workaround subsequent reads from
* there (e.g: during system-wide power management).
*
* bus->reset is invoked before mdiobus_scan during mdiobus_register and is
* therefore the right location to stick that workaround. Since we do not want
* to read from non-existing PHYs, we either use bus->phy_mask or do a manual
* Device Tree scan to limit the search area.
*/
static int unimac_mdio_reset(struct mii_bus *bus)
{
struct device_node *np = bus->dev.of_node;
struct device_node *child;
u32 read_mask = 0;
int addr;
if (!np) {
read_mask = ~bus->phy_mask;
} else {
for_each_available_child_of_node(np, child) {
addr = of_mdio_parse_addr(&bus->dev, child);
if (addr < 0)
continue;
read_mask |= 1 << addr;
}
}
for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
if (read_mask & 1 << addr) {
dev_dbg(&bus->dev, "Workaround for PHY @ %d\n", addr);
mdiobus_read(bus, addr, MII_BMSR);
}
}
return 0;
}
static int unimac_mdio_probe(struct platform_device *pdev)
{
struct unimac_mdio_pdata *pdata = pdev->dev.platform_data;
struct unimac_mdio_priv *priv;
struct device_node *np;
struct mii_bus *bus;
struct resource *r;
int ret;
np = pdev->dev.of_node;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r)
return -EINVAL;
/* Just ioremap, as this MDIO block is usually integrated into an
* Ethernet MAC controller register range
*/
priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
if (!priv->base) {
dev_err(&pdev->dev, "failed to remap register\n");
return -ENOMEM;
}
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus)
return -ENOMEM;
bus = priv->mii_bus;
bus->priv = priv;
if (pdata) {
bus->name = pdata->bus_name;
priv->wait_func = pdata->wait_func;
priv->wait_func_data = pdata->wait_func_data;
bus->phy_mask = ~pdata->phy_mask;
} else {
bus->name = "unimac MII bus";
priv->wait_func_data = priv;
priv->wait_func = unimac_mdio_poll;
}
bus->parent = &pdev->dev;
bus->read = unimac_mdio_read;
bus->write = unimac_mdio_write;
bus->reset = unimac_mdio_reset;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id);
ret = of_mdiobus_register(bus, np);
if (ret) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
goto out_mdio_free;
}
platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base);
return 0;
out_mdio_free:
mdiobus_free(bus);
return ret;
}
static int unimac_mdio_remove(struct platform_device *pdev)
{
struct unimac_mdio_priv *priv = platform_get_drvdata(pdev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
return 0;
}
static const struct of_device_id unimac_mdio_ids[] = {
{ .compatible = "brcm,genet-mdio-v5", },
{ .compatible = "brcm,genet-mdio-v4", },
{ .compatible = "brcm,genet-mdio-v3", },
{ .compatible = "brcm,genet-mdio-v2", },
{ .compatible = "brcm,genet-mdio-v1", },
{ .compatible = "brcm,unimac-mdio", },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, unimac_mdio_ids);
static struct platform_driver unimac_mdio_driver = {
.driver = {
.name = UNIMAC_MDIO_DRV_NAME,
.of_match_table = unimac_mdio_ids,
},
.probe = unimac_mdio_probe,
.remove = unimac_mdio_remove,
};
module_platform_driver(unimac_mdio_driver);
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" UNIMAC_MDIO_DRV_NAME);
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_659_0 |
crossvul-cpp_data_bad_573_0 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rds.h"
/*
* XXX
* - build with sparse
* - should we detect duplicate keys on a socket? hmm.
* - an rdma is an mlock, apply rlimit?
*/
/*
* get the number of pages by looking at the page indices that the start and
* end addresses fall in.
*
* Returns 0 if the vec is invalid. It is invalid if the number of bytes
* causes the address to wrap or overflows an unsigned int. This comes
* from being stored in the 'length' member of 'struct scatterlist'.
*/
static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
{
if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX))
return 0;
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
(vec->addr >> PAGE_SHIFT);
}
static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
struct rds_mr *insert)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rds_mr *mr;
while (*p) {
parent = *p;
mr = rb_entry(parent, struct rds_mr, r_rb_node);
if (key < mr->r_key)
p = &(*p)->rb_left;
else if (key > mr->r_key)
p = &(*p)->rb_right;
else
return mr;
}
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
refcount_inc(&insert->r_refcount);
}
return NULL;
}
/*
* Destroy the transport-specific part of a MR.
*/
static void rds_destroy_mr(struct rds_mr *mr)
{
struct rds_sock *rs = mr->r_sock;
void *trans_private = NULL;
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, refcount_read(&mr->r_refcount));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
trans_private = mr->r_trans_private;
mr->r_trans_private = NULL;
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (trans_private)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
void __rds_put_mr_final(struct rds_mr *mr)
{
rds_destroy_mr(mr);
kfree(mr);
}
/*
* By the time this is called we can't have any more ioctls called on
* the socket so we don't need to worry about racing with others.
*/
void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
unsigned long flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = rb_entry(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
rds_destroy_mr(mr);
rds_mr_put(mr);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/*
* Helper function to pin user pages.
*/
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
int ret;
ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
put_page(pages[ret]);
ret = -EFAULT;
}
return ret;
}
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
u64 *cookie_ret, struct rds_mr **mr_ret)
{
struct rds_mr *mr = NULL, *found;
unsigned int nr_pages;
struct page **pages = NULL;
struct scatterlist *sg;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
unsigned int nents;
long i;
int ret;
if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
goto out;
}
/* Restrict the size of mr irrespective of underlying transport
* To account for unaligned mr regions, subtract one from nr_pages
*/
if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
ret = -EMSGSIZE;
goto out;
}
rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
args->vec.addr, args->vec.bytes, nr_pages);
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
goto out;
}
refcount_set(&mr->r_refcount, 1);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1;
if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/*
* Pin the pages that make up the user buffer and transfer the page
* pointers to the mr's sg array. We check to see if we've mapped
* the whole region after transferring the partial page references
* to the sg array so that we can have one page ref cleanup path.
*
* For now we have no flag that tells us whether the mapping is
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */
for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(sg, nents, rs,
&mr->r_key);
if (IS_ERR(trans_private)) {
for (i = 0 ; i < nents; i++)
put_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
goto out;
}
mr->r_trans_private = trans_private;
rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
mr->r_key, (void *)(unsigned long) args->cookie_addr);
/* The user may pass us an unaligned address, but we can only
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
ret = -EFAULT;
goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
refcount_inc(&mr->r_refcount);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages);
if (mr)
rds_mr_put(mr);
return ret;
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
sizeof(struct rds_get_mr_args)))
return -EFAULT;
return __rds_rdma_map(rs, &args, NULL, NULL);
}
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_for_dest_args args;
struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
sizeof(struct rds_get_mr_for_dest_args)))
return -EFAULT;
/*
* Initially, just behave like get_mr().
* TODO: Implement get_mr as wrapper around this
* and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
return __rds_rdma_map(rs, &new_args, NULL, NULL);
}
/*
* Free the MR indicated by the given R_Key
*/
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_free_mr_args args;
struct rds_mr *mr;
unsigned long flags;
if (optlen != sizeof(struct rds_free_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
sizeof(struct rds_free_mr_args)))
return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */
if (args.cookie == 0) {
if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
return -EINVAL;
rs->rs_transport->flush_mrs();
return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree
* so nobody else finds it.
* This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (!mr)
return -EINVAL;
/*
* call rds_destroy_mr() ourselves so that we're sure it's done by the time
* we return. If we let rds_mr_put() do it it might not happen until
* someone else drops their ref.
*/
rds_destroy_mr(mr);
rds_mr_put(mr);
return 0;
}
/*
* This is called when we receive an extension header that
* tells us this MR was used. It allows us to implement
* use_once semantics
*/
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{
struct rds_mr *mr;
unsigned long flags;
int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr) {
pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
return;
}
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
if (zot_me) {
rds_destroy_mr(mr);
rds_mr_put(mr);
}
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
if (!ro->op_write) {
WARN_ON(!page->mapping && irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
{
struct page *page = sg_page(ao->op_sg);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
set_page_dirty(page);
put_page(page);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
ao->op_active = 0;
}
/*
* Count the number of pages needed to describe an incoming iovec array.
*/
static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
/* figure out the number of pages in the vector */
for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages;
}
int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
if (args->nr_local == 0)
return -EINVAL;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
sizeof(struct rds_iovec)))
return -EFAULT;
nr_pages = rds_pages_in_vec(&vec);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/*
* The application asks for a RDMA transfer.
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
int iov_size;
unsigned int i, j;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out_ret;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out_ret;
}
/* Check whether to allocate the iovec area */
iov_size = args->nr_local * sizeof(struct rds_iovec);
if (args->nr_local > UIO_FASTIOV) {
iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
if (!iovs) {
ret = -ENOMEM;
goto out_ret;
}
}
if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
ret = -EFAULT;
goto out;
}
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
goto out;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (!op->op_sg) {
ret = -ENOMEM;
goto out;
}
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
/* Enable rmda notification on data operation for composite
* rds messages and make sure notification is enabled only
* for the data operation which follows it so that application
* gets notified only after full message gets delivered.
*/
if (rm->data.op_sg) {
rm->rdma.op_notify = 0;
rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
}
}
/* The cookie contains the R_Key of the remote memory region, and
* optionally an offset into it. This is how we implement RDMA into
* unaligned memory.
* When setting up the RDMA, we need to add that offset to the
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
for (i = 0; i < args->nr_local; i++) {
struct rds_iovec *iov = &iovs[i];
/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
unsigned int nr = rds_pages_in_vec(iov);
rs->rs_user_addr = iov->addr;
rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
else
ret = 0;
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
iov->addr += sg->length;
iov->bytes -= sg->length;
}
op->op_nents += nr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
goto out;
}
op->op_bytes = nr_bytes;
out:
if (iovs != iovstack)
sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
out_ret:
if (ret)
rds_rdma_free_op(op);
else
rds_stats_inc(s_send_rdma);
return ret;
}
/*
* The application wants us to pass an RDMA destination (aka MR)
* to the remote
*/
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
unsigned long flags;
struct rds_mr *mr;
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
/* We are reusing a previously mapped MR here. Most likely, the
* application has written to the buffer, so we need to explicitly
* flush those writes to RAM. Otherwise the HCA may not see them
* when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr)
err = -EINVAL; /* invalid r_key */
else
refcount_inc(&mr->r_refcount);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
}
/*
* The application passes us an address range it wants to enable RDMA
* to/from. We map the area, and save the <R_Key,offset> pair
* in rm->m_rdma_cookie. This causes it to be sent along to the peer
* in an extension header.
*/
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
}
/*
* Fill in rds_message for an atomic request.
*/
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct page *page = NULL;
struct rds_atomic_args *args;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
/* Nonmasked & masked cmsg ops converted to masked hw ops */
switch (cmsg->cmsg_type) {
case RDS_CMSG_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = 0;
break;
case RDS_CMSG_MASKED_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->m_fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
break;
case RDS_CMSG_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->cswp.compare;
rm->atomic.op_m_cswp.swap = args->cswp.swap;
rm->atomic.op_m_cswp.compare_mask = ~0;
rm->atomic.op_m_cswp.swap_mask = ~0;
break;
case RDS_CMSG_MASKED_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
break;
default:
BUG(); /* should never happen */
}
rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
if (!rm->atomic.op_sg) {
ret = -ENOMEM;
goto err;
}
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
ret = -EFAULT;
goto err;
}
ret = rds_pin_pages(args->local_addr, 1, &page, 1);
if (ret != 1)
goto err;
ret = 0;
sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
if (rm->atomic.op_notify || rm->atomic.op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
}
rm->atomic.op_notifier->n_user_token = args->user_token;
rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
}
rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
return ret;
err:
if (page)
put_page(page);
kfree(rm->atomic.op_notifier);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_573_0 |
crossvul-cpp_data_bad_3380_0 | /* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ecma-alloc.h"
#include "ecma-helpers.h"
#include "ecma-function-object.h"
#include "ecma-literal-storage.h"
#include "js-parser-internal.h"
#include "lit-char-helpers.h"
#if JERRY_JS_PARSER
/** \addtogroup parser Parser
* @{
*
* \addtogroup jsparser JavaScript
* @{
*
* \addtogroup jsparser_lexer Lexer
* @{
*/
#define IS_UTF8_INTERMEDIATE_OCTET(byte) (((byte) & LIT_UTF8_EXTRA_BYTE_MASK) == LIT_UTF8_2_BYTE_CODE_POINT_MIN)
/**
* Align column to the next tab position.
*
* @return aligned position
*/
static parser_line_counter_t
align_column_to_tab (parser_line_counter_t column) /**< current column */
{
/* Tab aligns to zero column start position. */
return (parser_line_counter_t) (((column + (8u - 1u)) & ~ECMA_STRING_CONTAINER_MASK) + 1u);
} /* align_column_to_tab */
/**
* Parse hexadecimal character sequence
*
* @return character value
*/
ecma_char_t
lexer_hex_to_character (parser_context_t *context_p, /**< context */
const uint8_t *source_p, /**< current source position */
int length) /**< source length */
{
uint32_t result = 0;
do
{
uint32_t byte = *source_p++;
result <<= 4;
if (byte >= LIT_CHAR_0 && byte <= LIT_CHAR_9)
{
result += byte - LIT_CHAR_0;
}
else
{
byte = LEXER_TO_ASCII_LOWERCASE (byte);
if (byte >= LIT_CHAR_LOWERCASE_A && byte <= LIT_CHAR_LOWERCASE_F)
{
result += byte - (LIT_CHAR_LOWERCASE_A - 10);
}
else
{
parser_raise_error (context_p, PARSER_ERR_INVALID_ESCAPE_SEQUENCE);
}
}
}
while (--length > 0);
return (ecma_char_t) result;
} /* lexer_hex_to_character */
/**
* Skip space mode
*/
typedef enum
{
LEXER_SKIP_SPACES, /**< skip spaces mode */
LEXER_SKIP_SINGLE_LINE_COMMENT, /**< parse single line comment */
LEXER_SKIP_MULTI_LINE_COMMENT, /**< parse multi line comment */
} skip_mode_t;
/**
* Skip spaces.
*/
static void
skip_spaces (parser_context_t *context_p) /**< context */
{
skip_mode_t mode = LEXER_SKIP_SPACES;
const uint8_t *source_end_p = context_p->source_end_p;
context_p->token.was_newline = 0;
while (true)
{
if (context_p->source_p >= source_end_p)
{
if (mode == LEXER_SKIP_MULTI_LINE_COMMENT)
{
parser_raise_error (context_p, PARSER_ERR_UNTERMINATED_MULTILINE_COMMENT);
}
return;
}
switch (context_p->source_p[0])
{
case LIT_CHAR_CR:
{
if (context_p->source_p + 1 < source_end_p
&& context_p->source_p[1] == LIT_CHAR_LF)
{
context_p->source_p++;
}
/* FALLTHRU */
}
case LIT_CHAR_LF:
{
context_p->line++;
context_p->column = 0;
context_p->token.was_newline = 1;
if (mode == LEXER_SKIP_SINGLE_LINE_COMMENT)
{
mode = LEXER_SKIP_SPACES;
}
/* FALLTHRU */
}
case LIT_CHAR_VTAB:
case LIT_CHAR_FF:
case LIT_CHAR_SP:
{
context_p->source_p++;
context_p->column++;
continue;
}
case LIT_CHAR_TAB:
{
context_p->column = align_column_to_tab (context_p->column);
context_p->source_p++;
continue;
}
case LIT_CHAR_SLASH:
{
if (mode == LEXER_SKIP_SPACES
&& context_p->source_p + 1 < source_end_p)
{
if (context_p->source_p[1] == LIT_CHAR_SLASH)
{
mode = LEXER_SKIP_SINGLE_LINE_COMMENT;
}
else if (context_p->source_p[1] == LIT_CHAR_ASTERISK)
{
mode = LEXER_SKIP_MULTI_LINE_COMMENT;
context_p->token.line = context_p->line;
context_p->token.column = context_p->column;
}
if (mode != LEXER_SKIP_SPACES)
{
context_p->source_p += 2;
PARSER_PLUS_EQUAL_LC (context_p->column, 2);
continue;
}
}
break;
}
case LIT_CHAR_ASTERISK:
{
if (mode == LEXER_SKIP_MULTI_LINE_COMMENT
&& context_p->source_p + 1 < source_end_p
&& context_p->source_p[1] == LIT_CHAR_SLASH)
{
mode = LEXER_SKIP_SPACES;
context_p->source_p += 2;
PARSER_PLUS_EQUAL_LC (context_p->column, 2);
continue;
}
break;
}
case 0xc2:
{
if (context_p->source_p + 1 < source_end_p
&& context_p->source_p[1] == 0xa0)
{
/* Codepoint \u00A0 */
context_p->source_p += 2;
context_p->column++;
continue;
}
break;
}
case LEXER_NEWLINE_LS_PS_BYTE_1:
{
JERRY_ASSERT (context_p->source_p + 2 < source_end_p);
if (LEXER_NEWLINE_LS_PS_BYTE_23 (context_p->source_p))
{
/* Codepoint \u2028 and \u2029 */
context_p->source_p += 3;
context_p->line++;
context_p->column = 1;
context_p->token.was_newline = 1;
if (mode == LEXER_SKIP_SINGLE_LINE_COMMENT)
{
mode = LEXER_SKIP_SPACES;
}
continue;
}
break;
}
case 0xef:
{
if (context_p->source_p + 2 < source_end_p
&& context_p->source_p[1] == 0xbb
&& context_p->source_p[2] == 0xbf)
{
/* Codepoint \uFEFF */
context_p->source_p += 3;
context_p->column++;
continue;
}
break;
}
default:
{
break;
}
}
if (mode == LEXER_SKIP_SPACES)
{
return;
}
context_p->source_p++;
if (context_p->source_p < source_end_p
&& IS_UTF8_INTERMEDIATE_OCTET (context_p->source_p[0]))
{
context_p->column++;
}
}
} /* skip_spaces */
/**
* Keyword data.
*/
typedef struct
{
const uint8_t *keyword_p; /**< keyword string */
lexer_token_type_t type; /**< keyword token type */
} keyword_string_t;
#define LEXER_KEYWORD(name, type) { (const uint8_t *) (name), (type) }
#define LEXER_KEYWORD_END() { (const uint8_t *) NULL, LEXER_EOS }
/**
* Keywords with 2 characters.
*/
static const keyword_string_t keyword_length_2[4] =
{
LEXER_KEYWORD ("do", LEXER_KEYW_DO),
LEXER_KEYWORD ("if", LEXER_KEYW_IF),
LEXER_KEYWORD ("in", LEXER_KEYW_IN),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 3 characters.
*/
static const keyword_string_t keyword_length_3[6] =
{
LEXER_KEYWORD ("for", LEXER_KEYW_FOR),
LEXER_KEYWORD ("let", LEXER_KEYW_LET),
LEXER_KEYWORD ("new", LEXER_KEYW_NEW),
LEXER_KEYWORD ("try", LEXER_KEYW_TRY),
LEXER_KEYWORD ("var", LEXER_KEYW_VAR),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 4 characters.
*/
static const keyword_string_t keyword_length_4[9] =
{
LEXER_KEYWORD ("case", LEXER_KEYW_CASE),
LEXER_KEYWORD ("else", LEXER_KEYW_ELSE),
LEXER_KEYWORD ("enum", LEXER_KEYW_ENUM),
LEXER_KEYWORD ("null", LEXER_LIT_NULL),
LEXER_KEYWORD ("this", LEXER_KEYW_THIS),
LEXER_KEYWORD ("true", LEXER_LIT_TRUE),
LEXER_KEYWORD ("void", LEXER_KEYW_VOID),
LEXER_KEYWORD ("with", LEXER_KEYW_WITH),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 5 characters.
*/
static const keyword_string_t keyword_length_5[10] =
{
LEXER_KEYWORD ("break", LEXER_KEYW_BREAK),
LEXER_KEYWORD ("catch", LEXER_KEYW_CATCH),
LEXER_KEYWORD ("class", LEXER_KEYW_CLASS),
LEXER_KEYWORD ("const", LEXER_KEYW_CONST),
LEXER_KEYWORD ("false", LEXER_LIT_FALSE),
LEXER_KEYWORD ("super", LEXER_KEYW_SUPER),
LEXER_KEYWORD ("throw", LEXER_KEYW_THROW),
LEXER_KEYWORD ("while", LEXER_KEYW_WHILE),
LEXER_KEYWORD ("yield", LEXER_KEYW_YIELD),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 6 characters.
*/
static const keyword_string_t keyword_length_6[9] =
{
LEXER_KEYWORD ("delete", LEXER_KEYW_DELETE),
LEXER_KEYWORD ("export", LEXER_KEYW_EXPORT),
LEXER_KEYWORD ("import", LEXER_KEYW_IMPORT),
LEXER_KEYWORD ("public", LEXER_KEYW_PUBLIC),
LEXER_KEYWORD ("return", LEXER_KEYW_RETURN),
LEXER_KEYWORD ("static", LEXER_KEYW_STATIC),
LEXER_KEYWORD ("switch", LEXER_KEYW_SWITCH),
LEXER_KEYWORD ("typeof", LEXER_KEYW_TYPEOF),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 7 characters.
*/
static const keyword_string_t keyword_length_7[6] =
{
LEXER_KEYWORD ("default", LEXER_KEYW_DEFAULT),
LEXER_KEYWORD ("extends", LEXER_KEYW_EXTENDS),
LEXER_KEYWORD ("finally", LEXER_KEYW_FINALLY),
LEXER_KEYWORD ("package", LEXER_KEYW_PACKAGE),
LEXER_KEYWORD ("private", LEXER_KEYW_PRIVATE),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 8 characters.
*/
static const keyword_string_t keyword_length_8[4] =
{
LEXER_KEYWORD ("continue", LEXER_KEYW_CONTINUE),
LEXER_KEYWORD ("debugger", LEXER_KEYW_DEBUGGER),
LEXER_KEYWORD ("function", LEXER_KEYW_FUNCTION),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 9 characters.
*/
static const keyword_string_t keyword_length_9[3] =
{
LEXER_KEYWORD ("interface", LEXER_KEYW_INTERFACE),
LEXER_KEYWORD ("protected", LEXER_KEYW_PROTECTED),
LEXER_KEYWORD_END ()
};
/**
* Keywords with 10 characters.
*/
static const keyword_string_t keyword_length_10[3] =
{
LEXER_KEYWORD ("implements", LEXER_KEYW_IMPLEMENTS),
LEXER_KEYWORD ("instanceof", LEXER_KEYW_INSTANCEOF),
LEXER_KEYWORD_END ()
};
/**
* List to the keywords.
*/
static const keyword_string_t * const keyword_string_list[9] =
{
keyword_length_2,
keyword_length_3,
keyword_length_4,
keyword_length_5,
keyword_length_6,
keyword_length_7,
keyword_length_8,
keyword_length_9,
keyword_length_10
};
#undef LEXER_KEYWORD
#undef LEXER_KEYWORD_END
/**
* Parse identifier.
*/
static void
lexer_parse_identifier (parser_context_t *context_p, /**< context */
bool check_keywords) /**< check keywords */
{
/* Only very few identifiers contains \u escape sequences. */
const uint8_t *source_p = context_p->source_p;
const uint8_t *ident_start_p = context_p->source_p;
/* Note: newline or tab cannot be part of an identifier. */
parser_line_counter_t column = context_p->column;
const uint8_t *source_end_p = context_p->source_end_p;
size_t length = 0;
context_p->token.type = LEXER_LITERAL;
context_p->token.literal_is_reserved = false;
context_p->token.lit_location.type = LEXER_IDENT_LITERAL;
context_p->token.lit_location.has_escape = false;
do
{
if (*source_p == LIT_CHAR_BACKSLASH)
{
uint16_t character;
context_p->token.lit_location.has_escape = true;
context_p->source_p = source_p;
context_p->token.column = column;
if ((source_p + 6 > source_end_p) || (source_p[1] != LIT_CHAR_LOWERCASE_U))
{
parser_raise_error (context_p, PARSER_ERR_INVALID_UNICODE_ESCAPE_SEQUENCE);
}
character = lexer_hex_to_character (context_p, source_p + 2, 4);
if (length == 0)
{
if (!lit_char_is_identifier_start_character (character))
{
parser_raise_error (context_p, PARSER_ERR_INVALID_IDENTIFIER_START);
}
}
else
{
if (!lit_char_is_identifier_part_character (character))
{
parser_raise_error (context_p, PARSER_ERR_INVALID_IDENTIFIER_PART);
}
}
length += lit_char_get_utf8_length (character);
source_p += 6;
PARSER_PLUS_EQUAL_LC (column, 6);
continue;
}
/* Valid identifiers cannot contain 4 byte long utf-8
* characters, since those characters are represented
* by 2 ecmascript (UTF-16) characters, and those
* characters cannot be literal characters. */
JERRY_ASSERT (source_p[0] < LEXER_UTF8_4BYTE_START);
source_p++;
length++;
column++;
while (source_p < source_end_p
&& IS_UTF8_INTERMEDIATE_OCTET (source_p[0]))
{
source_p++;
length++;
}
}
while (source_p < source_end_p
&& (lit_char_is_identifier_part (source_p) || *source_p == LIT_CHAR_BACKSLASH));
context_p->source_p = ident_start_p;
context_p->token.column = context_p->column;
if (length > PARSER_MAXIMUM_IDENT_LENGTH)
{
parser_raise_error (context_p, PARSER_ERR_IDENTIFIER_TOO_LONG);
}
/* Check keywords (Only if there is no \u escape sequence in the pattern). */
if (check_keywords
&& !context_p->token.lit_location.has_escape
&& (length >= 2 && length <= 10))
{
const keyword_string_t *keyword_p = keyword_string_list[length - 2];
do
{
if (ident_start_p[0] == keyword_p->keyword_p[0]
&& ident_start_p[1] == keyword_p->keyword_p[1]
&& memcmp (ident_start_p, keyword_p->keyword_p, length) == 0)
{
if (keyword_p->type >= LEXER_FIRST_FUTURE_STRICT_RESERVED_WORD)
{
if (context_p->status_flags & PARSER_IS_STRICT)
{
parser_raise_error (context_p, PARSER_ERR_STRICT_IDENT_NOT_ALLOWED);
}
context_p->token.literal_is_reserved = true;
break;
}
context_p->token.type = keyword_p->type;
break;
}
keyword_p++;
}
while (keyword_p->type != LEXER_EOS);
}
if (context_p->token.type == LEXER_LITERAL)
{
/* Fill literal data. */
context_p->token.lit_location.char_p = ident_start_p;
context_p->token.lit_location.length = (uint16_t) length;
}
context_p->source_p = source_p;
context_p->column = column;
} /* lexer_parse_identifier */
/**
* Parse string.
*/
static void
lexer_parse_string (parser_context_t *context_p) /**< context */
{
uint8_t str_end_character = context_p->source_p[0];
const uint8_t *source_p = context_p->source_p + 1;
const uint8_t *string_start_p = source_p;
const uint8_t *source_end_p = context_p->source_end_p;
parser_line_counter_t line = context_p->line;
parser_line_counter_t column = (parser_line_counter_t) (context_p->column + 1);
parser_line_counter_t original_line = line;
parser_line_counter_t original_column = column;
size_t length = 0;
uint8_t has_escape = false;
while (true)
{
if (source_p >= source_end_p)
{
context_p->token.line = original_line;
context_p->token.column = (parser_line_counter_t) (original_column - 1);
parser_raise_error (context_p, PARSER_ERR_UNTERMINATED_STRING);
}
if (*source_p == str_end_character)
{
break;
}
if (*source_p == LIT_CHAR_BACKSLASH)
{
source_p++;
column++;
if (source_p >= source_end_p)
{
/* Will throw an unterminated string error. */
continue;
}
has_escape = true;
/* Newline is ignored. */
if (*source_p == LIT_CHAR_CR
|| *source_p == LIT_CHAR_LF
|| (*source_p == LEXER_NEWLINE_LS_PS_BYTE_1 && LEXER_NEWLINE_LS_PS_BYTE_23 (source_p)))
{
if (*source_p == LIT_CHAR_CR)
{
source_p++;
if (source_p < source_end_p
&& *source_p == LIT_CHAR_LF)
{
source_p++;
}
}
else if (*source_p == LIT_CHAR_LF)
{
source_p++;
}
else
{
source_p += 3;
}
line++;
column = 1;
continue;
}
/* Except \x, \u, and octal numbers, everything is
* converted to a character which has the same byte length. */
if (*source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_3)
{
if (context_p->status_flags & PARSER_IS_STRICT)
{
parser_raise_error (context_p, PARSER_ERR_OCTAL_ESCAPE_NOT_ALLOWED);
}
source_p++;
column++;
if (source_p < source_end_p && *source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_7)
{
source_p++;
column++;
if (source_p < source_end_p && *source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_7)
{
/* Numbers >= 0x200 (0x80) requires
* two bytes for encoding in UTF-8. */
if (source_p[-2] >= LIT_CHAR_2)
{
length++;
}
source_p++;
column++;
}
}
length++;
continue;
}
if (*source_p >= LIT_CHAR_4 && *source_p <= LIT_CHAR_7)
{
if (context_p->status_flags & PARSER_IS_STRICT)
{
parser_raise_error (context_p, PARSER_ERR_OCTAL_ESCAPE_NOT_ALLOWED);
}
source_p++;
column++;
if (source_p < source_end_p && *source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_7)
{
source_p++;
column++;
}
/* The maximum number is 0x4d so the UTF-8
* representation is always one byte. */
length++;
continue;
}
if (*source_p == LIT_CHAR_LOWERCASE_X || *source_p == LIT_CHAR_LOWERCASE_U)
{
uint8_t hex_part_length = (*source_p == LIT_CHAR_LOWERCASE_X) ? 2 : 4;
context_p->token.line = line;
context_p->token.column = (parser_line_counter_t) (column - 1);
if (source_p + 1 + hex_part_length > source_end_p)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_ESCAPE_SEQUENCE);
}
length += lit_char_get_utf8_length (lexer_hex_to_character (context_p,
source_p + 1,
hex_part_length));
source_p += hex_part_length + 1;
PARSER_PLUS_EQUAL_LC (column, hex_part_length + 1u);
continue;
}
}
if (*source_p >= LEXER_UTF8_4BYTE_START)
{
/* Processing 4 byte unicode sequence (even if it is
* after a backslash). Always converted to two 3 byte
* long sequence. */
length += 2 * 3;
has_escape = true;
source_p += 4;
column++;
continue;
}
else if (*source_p == LIT_CHAR_CR
|| *source_p == LIT_CHAR_LF
|| (*source_p == LEXER_NEWLINE_LS_PS_BYTE_1 && LEXER_NEWLINE_LS_PS_BYTE_23 (source_p)))
{
context_p->token.line = line;
context_p->token.column = column;
parser_raise_error (context_p, PARSER_ERR_NEWLINE_NOT_ALLOWED);
}
else if (*source_p == LIT_CHAR_TAB)
{
column = align_column_to_tab (column);
/* Subtract -1 because column is increased below. */
column--;
}
source_p++;
column++;
length++;
while (source_p < source_end_p
&& IS_UTF8_INTERMEDIATE_OCTET (*source_p))
{
source_p++;
length++;
}
}
if (length > PARSER_MAXIMUM_STRING_LENGTH)
{
parser_raise_error (context_p, PARSER_ERR_STRING_TOO_LONG);
}
context_p->token.type = LEXER_LITERAL;
/* Fill literal data. */
context_p->token.lit_location.char_p = string_start_p;
context_p->token.lit_location.length = (uint16_t) length;
context_p->token.lit_location.type = LEXER_STRING_LITERAL;
context_p->token.lit_location.has_escape = has_escape;
context_p->source_p = source_p + 1;
context_p->line = line;
context_p->column = (parser_line_counter_t) (column + 1);
} /* lexer_parse_string */
/**
* Parse number.
*/
static void
lexer_parse_number (parser_context_t *context_p) /**< context */
{
const uint8_t *source_p = context_p->source_p;
const uint8_t *source_end_p = context_p->source_end_p;
bool can_be_float = false;
size_t length;
context_p->token.type = LEXER_LITERAL;
context_p->token.literal_is_reserved = false;
context_p->token.extra_value = LEXER_NUMBER_DECIMAL;
context_p->token.lit_location.char_p = source_p;
context_p->token.lit_location.type = LEXER_NUMBER_LITERAL;
context_p->token.lit_location.has_escape = false;
if (source_p[0] == LIT_CHAR_0
&& source_p + 1 < source_end_p)
{
if (LEXER_TO_ASCII_LOWERCASE (source_p[1]) == LIT_CHAR_LOWERCASE_X)
{
context_p->token.extra_value = LEXER_NUMBER_HEXADECIMAL;
source_p += 2;
if (source_p >= source_end_p
|| !lit_char_is_hex_digit (source_p[0]))
{
parser_raise_error (context_p, PARSER_ERR_INVALID_HEX_DIGIT);
}
do
{
source_p++;
}
while (source_p < source_end_p
&& lit_char_is_hex_digit (source_p[0]));
}
else if (source_p[1] >= LIT_CHAR_0
&& source_p[1] <= LIT_CHAR_7)
{
context_p->token.extra_value = LEXER_NUMBER_OCTAL;
if (context_p->status_flags & PARSER_IS_STRICT)
{
parser_raise_error (context_p, PARSER_ERR_OCTAL_NUMBER_NOT_ALLOWED);
}
do
{
source_p++;
}
while (source_p < source_end_p
&& source_p[0] >= LIT_CHAR_0
&& source_p[0] <= LIT_CHAR_7);
if (source_p < source_end_p
&& source_p[0] >= LIT_CHAR_8
&& source_p[0] <= LIT_CHAR_9)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_NUMBER);
}
}
else if (source_p[1] >= LIT_CHAR_8
&& source_p[1] <= LIT_CHAR_9)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_NUMBER);
}
else
{
can_be_float = true;
source_p++;
}
}
else
{
do
{
source_p++;
}
while (source_p < source_end_p
&& source_p[0] >= LIT_CHAR_0
&& source_p[0] <= LIT_CHAR_9);
can_be_float = true;
}
if (can_be_float)
{
if (source_p < source_end_p
&& source_p[0] == LIT_CHAR_DOT)
{
source_p++;
while (source_p < source_end_p
&& source_p[0] >= LIT_CHAR_0
&& source_p[0] <= LIT_CHAR_9)
{
source_p++;
}
}
if (source_p < source_end_p
&& LEXER_TO_ASCII_LOWERCASE (source_p[0]) == LIT_CHAR_LOWERCASE_E)
{
source_p++;
if (source_p < source_end_p
&& (source_p[0] == LIT_CHAR_PLUS || source_p[0] == LIT_CHAR_MINUS))
{
source_p++;
}
if (source_p >= source_end_p
|| source_p[0] < LIT_CHAR_0
|| source_p[0] > LIT_CHAR_9)
{
parser_raise_error (context_p, PARSER_ERR_MISSING_EXPONENT);
}
do
{
source_p++;
}
while (source_p < source_end_p
&& source_p[0] >= LIT_CHAR_0
&& source_p[0] <= LIT_CHAR_9);
}
}
if (source_p < source_end_p
&& (lit_char_is_identifier_start (source_p) || source_p[0] == LIT_CHAR_BACKSLASH))
{
parser_raise_error (context_p, PARSER_ERR_IDENTIFIER_AFTER_NUMBER);
}
length = (size_t) (source_p - context_p->source_p);
if (length > PARSER_MAXIMUM_IDENT_LENGTH)
{
parser_raise_error (context_p, PARSER_ERR_NUMBER_TOO_LONG);
}
context_p->token.lit_location.length = (uint16_t) length;
PARSER_PLUS_EQUAL_LC (context_p->column, length);
context_p->source_p = source_p;
} /* lexer_parse_number */
#define LEXER_TYPE_A_TOKEN(char1, type1) \
case (uint8_t) (char1) : \
{ \
context_p->token.type = (type1); \
length = 1; \
break; \
}
#define LEXER_TYPE_B_TOKEN(char1, type1, char2, type2) \
case (uint8_t) (char1) : \
{ \
if (length >= 2 && context_p->source_p[1] == (uint8_t) (char2)) \
{ \
context_p->token.type = (type2); \
length = 2; \
break; \
} \
\
context_p->token.type = (type1); \
length = 1; \
break; \
}
#define LEXER_TYPE_C_TOKEN(char1, type1, char2, type2, char3, type3) \
case (uint8_t) (char1) : \
{ \
if (length >= 2) \
{ \
if (context_p->source_p[1] == (uint8_t) (char2)) \
{ \
context_p->token.type = (type2); \
length = 2; \
break; \
} \
\
if (context_p->source_p[1] == (uint8_t) (char3)) \
{ \
context_p->token.type = (type3); \
length = 2; \
break; \
} \
} \
\
context_p->token.type = (type1); \
length = 1; \
break; \
}
#define LEXER_TYPE_D_TOKEN(char1, type1, char2, type2, char3, type3) \
case (uint8_t) (char1) : \
{ \
if (length >= 2 && context_p->source_p[1] == (uint8_t) (char2)) \
{ \
if (length >= 3 && context_p->source_p[2] == (uint8_t) (char3)) \
{ \
context_p->token.type = (type3); \
length = 3; \
break; \
} \
\
context_p->token.type = (type2); \
length = 2; \
break; \
} \
\
context_p->token.type = (type1); \
length = 1; \
break; \
}
/**
* Get next token.
*/
void
lexer_next_token (parser_context_t *context_p) /**< context */
{
size_t length;
skip_spaces (context_p);
context_p->token.line = context_p->line;
context_p->token.column = context_p->column;
length = (size_t) (context_p->source_end_p - context_p->source_p);
if (length == 0)
{
context_p->token.type = LEXER_EOS;
return;
}
if (lit_char_is_identifier_start (context_p->source_p)
|| context_p->source_p[0] == LIT_CHAR_BACKSLASH)
{
lexer_parse_identifier (context_p, true);
return;
}
if (context_p->source_p[0] >= LIT_CHAR_0 && context_p->source_p[0] <= LIT_CHAR_9)
{
lexer_parse_number (context_p);
return;
}
switch (context_p->source_p[0])
{
LEXER_TYPE_A_TOKEN (LIT_CHAR_LEFT_BRACE, LEXER_LEFT_BRACE);
LEXER_TYPE_A_TOKEN (LIT_CHAR_LEFT_PAREN, LEXER_LEFT_PAREN);
LEXER_TYPE_A_TOKEN (LIT_CHAR_LEFT_SQUARE, LEXER_LEFT_SQUARE);
LEXER_TYPE_A_TOKEN (LIT_CHAR_RIGHT_BRACE, LEXER_RIGHT_BRACE);
LEXER_TYPE_A_TOKEN (LIT_CHAR_RIGHT_PAREN, LEXER_RIGHT_PAREN);
LEXER_TYPE_A_TOKEN (LIT_CHAR_RIGHT_SQUARE, LEXER_RIGHT_SQUARE);
LEXER_TYPE_A_TOKEN (LIT_CHAR_SEMICOLON, LEXER_SEMICOLON);
LEXER_TYPE_A_TOKEN (LIT_CHAR_COMMA, LEXER_COMMA);
case (uint8_t) LIT_CHAR_DOT :
{
if (length >= 2
&& (context_p->source_p[1] >= LIT_CHAR_0 && context_p->source_p[1] <= LIT_CHAR_9))
{
lexer_parse_number (context_p);
return;
}
context_p->token.type = LEXER_DOT;
length = 1;
break;
}
case (uint8_t) LIT_CHAR_LESS_THAN:
{
if (length >= 2)
{
if (context_p->source_p[1] == (uint8_t) LIT_CHAR_EQUALS)
{
context_p->token.type = LEXER_LESS_EQUAL;
length = 2;
break;
}
if (context_p->source_p[1] == (uint8_t) LIT_CHAR_LESS_THAN)
{
if (length >= 3 && context_p->source_p[2] == (uint8_t) LIT_CHAR_EQUALS)
{
context_p->token.type = LEXER_ASSIGN_LEFT_SHIFT;
length = 3;
break;
}
context_p->token.type = LEXER_LEFT_SHIFT;
length = 2;
break;
}
}
context_p->token.type = LEXER_LESS;
length = 1;
break;
}
case LIT_CHAR_GREATER_THAN:
{
if (length >= 2)
{
if (context_p->source_p[1] == (uint8_t) LIT_CHAR_EQUALS)
{
context_p->token.type = LEXER_GREATER_EQUAL;
length = 2;
break;
}
if (context_p->source_p[1] == (uint8_t) LIT_CHAR_GREATER_THAN)
{
if (length >= 3)
{
if (context_p->source_p[2] == (uint8_t) LIT_CHAR_EQUALS)
{
context_p->token.type = LEXER_ASSIGN_RIGHT_SHIFT;
length = 3;
break;
}
if (context_p->source_p[2] == (uint8_t) LIT_CHAR_GREATER_THAN)
{
if (length >= 4 && context_p->source_p[3] == (uint8_t) LIT_CHAR_EQUALS)
{
context_p->token.type = LEXER_ASSIGN_UNS_RIGHT_SHIFT;
length = 4;
break;
}
context_p->token.type = LEXER_UNS_RIGHT_SHIFT;
length = 3;
break;
}
}
context_p->token.type = LEXER_RIGHT_SHIFT;
length = 2;
break;
}
}
context_p->token.type = LEXER_GREATER;
length = 1;
break;
}
LEXER_TYPE_D_TOKEN (LIT_CHAR_EQUALS, LEXER_ASSIGN, LIT_CHAR_EQUALS,
LEXER_EQUAL, LIT_CHAR_EQUALS, LEXER_STRICT_EQUAL)
LEXER_TYPE_D_TOKEN (LIT_CHAR_EXCLAMATION, LEXER_LOGICAL_NOT, LIT_CHAR_EQUALS,
LEXER_NOT_EQUAL, LIT_CHAR_EQUALS, LEXER_STRICT_NOT_EQUAL)
LEXER_TYPE_C_TOKEN (LIT_CHAR_PLUS, LEXER_ADD, LIT_CHAR_EQUALS,
LEXER_ASSIGN_ADD, LIT_CHAR_PLUS, LEXER_INCREASE)
LEXER_TYPE_C_TOKEN (LIT_CHAR_MINUS, LEXER_SUBTRACT, LIT_CHAR_EQUALS,
LEXER_ASSIGN_SUBTRACT, LIT_CHAR_MINUS, LEXER_DECREASE)
LEXER_TYPE_B_TOKEN (LIT_CHAR_ASTERISK, LEXER_MULTIPLY, LIT_CHAR_EQUALS,
LEXER_ASSIGN_MULTIPLY)
LEXER_TYPE_B_TOKEN (LIT_CHAR_SLASH, LEXER_DIVIDE, LIT_CHAR_EQUALS,
LEXER_ASSIGN_DIVIDE)
LEXER_TYPE_B_TOKEN (LIT_CHAR_PERCENT, LEXER_MODULO, LIT_CHAR_EQUALS,
LEXER_ASSIGN_MODULO)
LEXER_TYPE_C_TOKEN (LIT_CHAR_AMPERSAND, LEXER_BIT_AND, LIT_CHAR_EQUALS,
LEXER_ASSIGN_BIT_AND, LIT_CHAR_AMPERSAND, LEXER_LOGICAL_AND)
LEXER_TYPE_C_TOKEN (LIT_CHAR_VLINE, LEXER_BIT_OR, LIT_CHAR_EQUALS,
LEXER_ASSIGN_BIT_OR, LIT_CHAR_VLINE, LEXER_LOGICAL_OR)
LEXER_TYPE_B_TOKEN (LIT_CHAR_CIRCUMFLEX, LEXER_BIT_XOR, LIT_CHAR_EQUALS,
LEXER_ASSIGN_BIT_XOR)
LEXER_TYPE_A_TOKEN (LIT_CHAR_TILDE, LEXER_BIT_NOT);
LEXER_TYPE_A_TOKEN (LIT_CHAR_QUESTION, LEXER_QUESTION_MARK);
LEXER_TYPE_A_TOKEN (LIT_CHAR_COLON, LEXER_COLON);
case LIT_CHAR_SINGLE_QUOTE:
case LIT_CHAR_DOUBLE_QUOTE:
{
lexer_parse_string (context_p);
return;
}
default:
{
parser_raise_error (context_p, PARSER_ERR_INVALID_CHARACTER);
}
}
context_p->source_p += length;
PARSER_PLUS_EQUAL_LC (context_p->column, length);
} /* lexer_next_token */
#undef LEXER_TYPE_A_TOKEN
#undef LEXER_TYPE_B_TOKEN
#undef LEXER_TYPE_C_TOKEN
#undef LEXER_TYPE_D_TOKEN
/**
* Search or append the string to the literal pool.
*/
static void
lexer_process_char_literal (parser_context_t *context_p, /**< context */
const uint8_t *char_p, /**< characters */
size_t length, /**< length of string */
uint8_t literal_type, /**< final literal type */
bool has_escape) /**< has escape sequences */
{
parser_list_iterator_t literal_iterator;
lexer_literal_t *literal_p;
uint32_t literal_index = 0;
JERRY_ASSERT (literal_type == LEXER_IDENT_LITERAL
|| literal_type == LEXER_STRING_LITERAL);
JERRY_ASSERT (literal_type != LEXER_IDENT_LITERAL || length <= PARSER_MAXIMUM_IDENT_LENGTH);
JERRY_ASSERT (literal_type != LEXER_STRING_LITERAL || length <= PARSER_MAXIMUM_STRING_LENGTH);
parser_list_iterator_init (&context_p->literal_pool, &literal_iterator);
while ((literal_p = (lexer_literal_t *) parser_list_iterator_next (&literal_iterator)) != NULL)
{
if (literal_p->type == literal_type
&& literal_p->prop.length == length
&& memcmp (literal_p->u.char_p, char_p, length) == 0)
{
context_p->lit_object.literal_p = literal_p;
context_p->lit_object.index = (uint16_t) literal_index;
literal_p->status_flags = (uint8_t) (literal_p->status_flags & ~LEXER_FLAG_UNUSED_IDENT);
return;
}
literal_index++;
}
JERRY_ASSERT (literal_index == context_p->literal_count);
if (literal_index >= PARSER_MAXIMUM_NUMBER_OF_LITERALS)
{
parser_raise_error (context_p, PARSER_ERR_LITERAL_LIMIT_REACHED);
}
literal_p = (lexer_literal_t *) parser_list_append (context_p, &context_p->literal_pool);
literal_p->prop.length = (uint16_t) length;
literal_p->type = literal_type;
literal_p->status_flags = has_escape ? 0 : LEXER_FLAG_SOURCE_PTR;
if (has_escape)
{
literal_p->u.char_p = (uint8_t *) jmem_heap_alloc_block (length);
memcpy ((uint8_t *) literal_p->u.char_p, char_p, length);
}
else
{
literal_p->u.char_p = char_p;
}
context_p->lit_object.literal_p = literal_p;
context_p->lit_object.index = (uint16_t) literal_index;
context_p->literal_count++;
} /* lexer_process_char_literal */
/* Maximum buffer size for identifiers which contains escape sequences. */
#define LEXER_MAX_LITERAL_LOCAL_BUFFER_SIZE 48
/**
* Construct a literal object from an identifier.
*/
void
lexer_construct_literal_object (parser_context_t *context_p, /**< context */
lexer_lit_location_t *literal_p, /**< literal location */
uint8_t literal_type) /**< final literal type */
{
uint8_t *destination_start_p;
const uint8_t *source_p;
uint8_t local_byte_array[LEXER_MAX_LITERAL_LOCAL_BUFFER_SIZE];
JERRY_ASSERT (literal_p->type == LEXER_IDENT_LITERAL
|| literal_p->type == LEXER_STRING_LITERAL);
JERRY_ASSERT (context_p->allocated_buffer_p == NULL);
destination_start_p = local_byte_array;
source_p = literal_p->char_p;
if (literal_p->has_escape)
{
uint8_t *destination_p;
if (literal_p->length > LEXER_MAX_LITERAL_LOCAL_BUFFER_SIZE)
{
destination_start_p = (uint8_t *) parser_malloc_local (context_p, literal_p->length);
context_p->allocated_buffer_p = destination_start_p;
context_p->allocated_buffer_size = literal_p->length;
}
destination_p = destination_start_p;
if (literal_p->type == LEXER_IDENT_LITERAL)
{
const uint8_t *source_end_p = context_p->source_end_p;
JERRY_ASSERT (literal_p->length <= PARSER_MAXIMUM_IDENT_LENGTH);
do
{
if (*source_p == LIT_CHAR_BACKSLASH)
{
destination_p += lit_char_to_utf8_bytes (destination_p,
lexer_hex_to_character (context_p, source_p + 2, 4));
source_p += 6;
continue;
}
*destination_p++ = *source_p++;
while (source_p < source_end_p
&& IS_UTF8_INTERMEDIATE_OCTET (*source_p))
{
*destination_p++ = *source_p++;
}
}
while (source_p < source_end_p
&& (lit_char_is_identifier_part (source_p) || *source_p == LIT_CHAR_BACKSLASH));
JERRY_ASSERT (destination_p == destination_start_p + literal_p->length);
}
else
{
uint8_t str_end_character = source_p[-1];
while (true)
{
if (*source_p == str_end_character)
{
break;
}
if (*source_p == LIT_CHAR_BACKSLASH)
{
uint8_t conv_character;
source_p++;
JERRY_ASSERT (source_p < context_p->source_end_p);
/* Newline is ignored. */
if (*source_p == LIT_CHAR_CR
|| *source_p == LIT_CHAR_LF
|| (*source_p == LEXER_NEWLINE_LS_PS_BYTE_1 && LEXER_NEWLINE_LS_PS_BYTE_23 (source_p)))
{
if (*source_p == LIT_CHAR_CR)
{
source_p++;
JERRY_ASSERT (source_p < context_p->source_end_p);
if (*source_p == LIT_CHAR_LF)
{
source_p++;
}
}
else if (*source_p == LIT_CHAR_LF)
{
source_p++;
}
else
{
source_p += 3;
}
continue;
}
if (*source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_3)
{
uint32_t octal_number = (uint32_t) (*source_p - LIT_CHAR_0);
source_p++;
JERRY_ASSERT (source_p < context_p->source_end_p);
if (*source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_7)
{
octal_number = octal_number * 8 + (uint32_t) (*source_p - LIT_CHAR_0);
source_p++;
JERRY_ASSERT (source_p < context_p->source_end_p);
if (*source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_7)
{
octal_number = octal_number * 8 + (uint32_t) (*source_p - LIT_CHAR_0);
source_p++;
JERRY_ASSERT (source_p < context_p->source_end_p);
}
}
destination_p += lit_char_to_utf8_bytes (destination_p, (uint16_t) octal_number);
continue;
}
if (*source_p >= LIT_CHAR_4 && *source_p <= LIT_CHAR_7)
{
uint32_t octal_number = (uint32_t) (*source_p - LIT_CHAR_0);
source_p++;
JERRY_ASSERT (source_p < context_p->source_end_p);
if (*source_p >= LIT_CHAR_0 && *source_p <= LIT_CHAR_7)
{
octal_number = octal_number * 8 + (uint32_t) (*source_p - LIT_CHAR_0);
source_p++;
JERRY_ASSERT (source_p < context_p->source_end_p);
}
*destination_p++ = (uint8_t) octal_number;
continue;
}
if (*source_p == LIT_CHAR_LOWERCASE_X || *source_p == LIT_CHAR_LOWERCASE_U)
{
int hex_part_length = (*source_p == LIT_CHAR_LOWERCASE_X) ? 2 : 4;
JERRY_ASSERT (source_p + 1 + hex_part_length <= context_p->source_end_p);
destination_p += lit_char_to_utf8_bytes (destination_p,
lexer_hex_to_character (context_p,
source_p + 1,
hex_part_length));
source_p += hex_part_length + 1;
continue;
}
conv_character = *source_p;
switch (*source_p)
{
case LIT_CHAR_LOWERCASE_B:
{
conv_character = 0x08;
break;
}
case LIT_CHAR_LOWERCASE_T:
{
conv_character = 0x09;
break;
}
case LIT_CHAR_LOWERCASE_N:
{
conv_character = 0x0a;
break;
}
case LIT_CHAR_LOWERCASE_V:
{
conv_character = 0x0b;
break;
}
case LIT_CHAR_LOWERCASE_F:
{
conv_character = 0x0c;
break;
}
case LIT_CHAR_LOWERCASE_R:
{
conv_character = 0x0d;
break;
}
}
if (conv_character != *source_p)
{
*destination_p++ = conv_character;
source_p++;
continue;
}
}
if (*source_p >= LEXER_UTF8_4BYTE_START)
{
/* Processing 4 byte unicode sequence (even if it is
* after a backslash). Always converted to two 3 byte
* long sequence. */
uint32_t character = ((((uint32_t) source_p[0]) & 0x7) << 18);
character |= ((((uint32_t) source_p[1]) & LIT_UTF8_LAST_6_BITS_MASK) << 12);
character |= ((((uint32_t) source_p[2]) & LIT_UTF8_LAST_6_BITS_MASK) << 6);
character |= (((uint32_t) source_p[3]) & LIT_UTF8_LAST_6_BITS_MASK);
JERRY_ASSERT (character >= 0x10000);
character -= 0x10000;
destination_p += lit_char_to_utf8_bytes (destination_p,
(ecma_char_t) (0xd800 | (character >> 10)));
destination_p += lit_char_to_utf8_bytes (destination_p,
(ecma_char_t) (0xdc00 | (character & LIT_UTF16_LAST_10_BITS_MASK)));
source_p += 4;
continue;
}
*destination_p++ = *source_p++;
/* There is no need to check the source_end_p
* since the string is terminated by a quotation mark. */
while (IS_UTF8_INTERMEDIATE_OCTET (*source_p))
{
*destination_p++ = *source_p++;
}
}
JERRY_ASSERT (destination_p == destination_start_p + literal_p->length);
}
source_p = destination_start_p;
}
lexer_process_char_literal (context_p,
source_p,
literal_p->length,
literal_type,
literal_p->has_escape);
context_p->lit_object.type = LEXER_LITERAL_OBJECT_ANY;
if (literal_type == LEXER_IDENT_LITERAL)
{
if ((context_p->status_flags & PARSER_INSIDE_WITH)
&& context_p->lit_object.literal_p->type == LEXER_IDENT_LITERAL)
{
context_p->lit_object.literal_p->status_flags |= LEXER_FLAG_NO_REG_STORE;
}
if (literal_p->length == 4
&& source_p[0] == LIT_CHAR_LOWERCASE_E
&& source_p[3] == LIT_CHAR_LOWERCASE_L
&& source_p[1] == LIT_CHAR_LOWERCASE_V
&& source_p[2] == LIT_CHAR_LOWERCASE_A)
{
context_p->lit_object.type = LEXER_LITERAL_OBJECT_EVAL;
}
if (literal_p->length == 9
&& source_p[0] == LIT_CHAR_LOWERCASE_A
&& source_p[8] == LIT_CHAR_LOWERCASE_S
&& memcmp (source_p + 1, "rgument", 7) == 0)
{
context_p->lit_object.type = LEXER_LITERAL_OBJECT_ARGUMENTS;
if (!(context_p->status_flags & PARSER_ARGUMENTS_NOT_NEEDED))
{
context_p->status_flags |= PARSER_ARGUMENTS_NEEDED | PARSER_LEXICAL_ENV_NEEDED;
context_p->lit_object.literal_p->status_flags |= LEXER_FLAG_NO_REG_STORE;
}
}
}
if (destination_start_p != local_byte_array)
{
JERRY_ASSERT (context_p->allocated_buffer_p == destination_start_p);
context_p->allocated_buffer_p = NULL;
parser_free_local (destination_start_p,
context_p->allocated_buffer_size);
}
JERRY_ASSERT (context_p->allocated_buffer_p == NULL);
} /* lexer_construct_literal_object */
#undef LEXER_MAX_LITERAL_LOCAL_BUFFER_SIZE
/**
* Construct a number object.
*
* @return true if number is small number
*/
bool
lexer_construct_number_object (parser_context_t *context_p, /**< context */
bool push_number_allowed, /**< push number support is allowed */
bool is_negative_number) /**< sign is negative */
{
parser_list_iterator_t literal_iterator;
lexer_literal_t *literal_p;
ecma_number_t num;
uint32_t literal_index = 0;
uint16_t length = context_p->token.lit_location.length;
if (context_p->token.extra_value != LEXER_NUMBER_OCTAL)
{
num = ecma_utf8_string_to_number (context_p->token.lit_location.char_p,
length);
}
else
{
const uint8_t *src_p = context_p->token.lit_location.char_p;
const uint8_t *src_end_p = src_p + length - 1;
num = 0;
do
{
src_p++;
num = num * 8 + (ecma_number_t) (*src_p - LIT_CHAR_0);
}
while (src_p < src_end_p);
}
if (push_number_allowed)
{
int32_t int_num = (int32_t) num;
if (int_num == num)
{
if (int_num <= CBC_PUSH_NUMBER_BYTE_RANGE_END
&& (int_num != 0 || !is_negative_number))
{
context_p->lit_object.index = (uint16_t) int_num;
return true;
}
}
}
if (is_negative_number)
{
num = -num;
}
jmem_cpointer_t lit_cp = ecma_find_or_create_literal_number (num);
parser_list_iterator_init (&context_p->literal_pool, &literal_iterator);
while ((literal_p = (lexer_literal_t *) parser_list_iterator_next (&literal_iterator)) != NULL)
{
if (literal_p->type == LEXER_NUMBER_LITERAL
&& literal_p->u.value == lit_cp)
{
context_p->lit_object.literal_p = literal_p;
context_p->lit_object.index = (uint16_t) literal_index;
context_p->lit_object.type = LEXER_LITERAL_OBJECT_ANY;
return false;
}
literal_index++;
}
JERRY_ASSERT (literal_index == context_p->literal_count);
if (literal_index >= PARSER_MAXIMUM_NUMBER_OF_LITERALS)
{
parser_raise_error (context_p, PARSER_ERR_LITERAL_LIMIT_REACHED);
}
literal_p = (lexer_literal_t *) parser_list_append (context_p, &context_p->literal_pool);
literal_p->prop.length = context_p->token.lit_location.length;
literal_p->type = LEXER_UNUSED_LITERAL;
literal_p->status_flags = 0;
context_p->literal_count++;
literal_p->u.value = lit_cp;
literal_p->type = LEXER_NUMBER_LITERAL;
context_p->lit_object.literal_p = literal_p;
context_p->lit_object.index = (uint16_t) literal_index;
context_p->lit_object.type = LEXER_LITERAL_OBJECT_ANY;
return false;
} /* lexer_construct_number_object */
/**
* Construct a function literal object.
*
* @return function object literal index
*/
uint16_t
lexer_construct_function_object (parser_context_t *context_p, /**< context */
uint32_t extra_status_flags) /**< extra status flags */
{
ecma_compiled_code_t *compiled_code_p;
lexer_literal_t *literal_p;
uint16_t result_index;
if (context_p->literal_count >= PARSER_MAXIMUM_NUMBER_OF_LITERALS)
{
parser_raise_error (context_p, PARSER_ERR_LITERAL_LIMIT_REACHED);
}
if (context_p->status_flags & PARSER_RESOLVE_THIS_FOR_CALLS)
{
extra_status_flags |= PARSER_RESOLVE_THIS_FOR_CALLS;
}
literal_p = (lexer_literal_t *) parser_list_append (context_p, &context_p->literal_pool);
literal_p->type = LEXER_UNUSED_LITERAL;
literal_p->status_flags = 0;
result_index = context_p->literal_count;
context_p->literal_count++;
compiled_code_p = parser_parse_function (context_p, extra_status_flags);
literal_p->u.bytecode_p = compiled_code_p;
literal_p->type = LEXER_FUNCTION_LITERAL;
return result_index;
} /* lexer_construct_function_object */
/**
* Construct a regular expression object.
*/
void
lexer_construct_regexp_object (parser_context_t *context_p, /**< context */
bool parse_only) /**< parse only */
{
#ifndef CONFIG_DISABLE_REGEXP_BUILTIN
const uint8_t *source_p = context_p->source_p;
const uint8_t *regex_start_p = context_p->source_p;
const uint8_t *regex_end_p = regex_start_p;
const uint8_t *source_end_p = context_p->source_end_p;
parser_line_counter_t column = context_p->column;
lexer_literal_t *literal_p;
bool in_class = false;
uint16_t current_flags;
lit_utf8_size_t length;
JERRY_ASSERT (context_p->token.type == LEXER_DIVIDE
|| context_p->token.type == LEXER_ASSIGN_DIVIDE);
if (context_p->token.type == LEXER_ASSIGN_DIVIDE)
{
regex_start_p--;
}
while (true)
{
if (source_p >= source_end_p)
{
parser_raise_error (context_p, PARSER_ERR_UNTERMINATED_REGEXP);
}
if (!in_class && source_p[0] == LIT_CHAR_SLASH)
{
regex_end_p = source_p;
source_p++;
column++;
break;
}
switch (source_p[0])
{
case LIT_CHAR_CR:
case LIT_CHAR_LF:
case LEXER_NEWLINE_LS_PS_BYTE_1:
{
if (source_p[0] != LEXER_NEWLINE_LS_PS_BYTE_1
|| LEXER_NEWLINE_LS_PS_BYTE_23 (source_p))
{
parser_raise_error (context_p, PARSER_ERR_NEWLINE_NOT_ALLOWED);
}
break;
}
case LIT_CHAR_TAB:
{
column = align_column_to_tab (column);
/* Subtract -1 because column is increased below. */
column--;
break;
}
case LIT_CHAR_LEFT_SQUARE:
{
in_class = true;
break;
}
case LIT_CHAR_RIGHT_SQUARE:
{
in_class = false;
break;
}
case LIT_CHAR_BACKSLASH:
{
if (source_p + 1 >= source_end_p)
{
parser_raise_error (context_p, PARSER_ERR_UNTERMINATED_REGEXP);
}
if (source_p[1] >= 0x20 && source_p[1] <= LIT_UTF8_1_BYTE_CODE_POINT_MAX)
{
source_p++;
column++;
}
}
}
source_p++;
column++;
while (source_p < source_end_p
&& IS_UTF8_INTERMEDIATE_OCTET (source_p[0]))
{
source_p++;
}
}
current_flags = 0;
while (source_p < source_end_p)
{
uint32_t flag = 0;
if (source_p[0] == LIT_CHAR_LOWERCASE_G)
{
flag = RE_FLAG_GLOBAL;
}
else if (source_p[0] == LIT_CHAR_LOWERCASE_I)
{
flag = RE_FLAG_IGNORE_CASE;
}
else if (source_p[0] == LIT_CHAR_LOWERCASE_M)
{
flag = RE_FLAG_MULTILINE;
}
if (flag == 0)
{
break;
}
if (current_flags & flag)
{
parser_raise_error (context_p, PARSER_ERR_DUPLICATED_REGEXP_FLAG);
}
current_flags = (uint16_t) (current_flags | flag);
source_p++;
column++;
}
if (source_p < source_end_p
&& lit_char_is_identifier_part (source_p))
{
parser_raise_error (context_p, PARSER_ERR_UNKNOWN_REGEXP_FLAG);
}
context_p->source_p = source_p;
context_p->column = column;
length = (lit_utf8_size_t) (regex_end_p - regex_start_p);
if (length > PARSER_MAXIMUM_STRING_LENGTH)
{
parser_raise_error (context_p, PARSER_ERR_REGEXP_TOO_LONG);
}
context_p->column = column;
context_p->source_p = source_p;
if (parse_only)
{
return;
}
if (context_p->literal_count >= PARSER_MAXIMUM_NUMBER_OF_LITERALS)
{
parser_raise_error (context_p, PARSER_ERR_LITERAL_LIMIT_REACHED);
}
literal_p = (lexer_literal_t *) parser_list_append (context_p, &context_p->literal_pool);
literal_p->prop.length = (uint16_t) length;
literal_p->type = LEXER_UNUSED_LITERAL;
literal_p->status_flags = 0;
context_p->literal_count++;
/* Compile the RegExp literal and store the RegExp bytecode pointer */
const re_compiled_code_t *re_bytecode_p = NULL;
ecma_value_t completion_value;
ecma_string_t *pattern_str_p = ecma_new_ecma_string_from_utf8 (regex_start_p, length);
completion_value = re_compile_bytecode (&re_bytecode_p,
pattern_str_p,
current_flags);
ecma_deref_ecma_string (pattern_str_p);
bool is_throw = ECMA_IS_VALUE_ERROR (completion_value);
ecma_free_value (completion_value);
if (is_throw)
{
parser_raise_error (context_p, PARSER_ERR_INVALID_REGEXP);
}
literal_p->type = LEXER_REGEXP_LITERAL;
literal_p->u.bytecode_p = (ecma_compiled_code_t *) re_bytecode_p;
context_p->token.type = LEXER_LITERAL;
context_p->token.literal_is_reserved = false;
context_p->token.lit_location.type = LEXER_REGEXP_LITERAL;
context_p->lit_object.literal_p = literal_p;
context_p->lit_object.index = (uint16_t) (context_p->literal_count - 1);
context_p->lit_object.type = LEXER_LITERAL_OBJECT_ANY;
#else /* CONFIG_DISABLE_REGEXP_BUILTIN */
JERRY_UNUSED (parse_only);
parser_raise_error (context_p, PARSER_ERR_UNSUPPORTED_REGEXP);
#endif /* !CONFIG_DISABLE_REGEXP_BUILTIN */
} /* lexer_construct_regexp_object */
/**
* Next token must be an identifier.
*/
void
lexer_expect_identifier (parser_context_t *context_p, /**< context */
uint8_t literal_type) /**< literal type */
{
JERRY_ASSERT (literal_type == LEXER_STRING_LITERAL
|| literal_type == LEXER_IDENT_LITERAL);
skip_spaces (context_p);
context_p->token.line = context_p->line;
context_p->token.column = context_p->column;
if (context_p->source_p < context_p->source_end_p
&& (lit_char_is_identifier_start (context_p->source_p) || context_p->source_p[0] == LIT_CHAR_BACKSLASH))
{
lexer_parse_identifier (context_p, literal_type != LEXER_STRING_LITERAL);
if (context_p->token.type == LEXER_LITERAL)
{
lexer_construct_literal_object (context_p,
&context_p->token.lit_location,
literal_type);
if (literal_type == LEXER_IDENT_LITERAL
&& (context_p->status_flags & PARSER_IS_STRICT)
&& context_p->lit_object.type != LEXER_LITERAL_OBJECT_ANY)
{
parser_error_t error;
if (context_p->lit_object.type == LEXER_LITERAL_OBJECT_EVAL)
{
error = PARSER_ERR_EVAL_NOT_ALLOWED;
}
else
{
JERRY_ASSERT (context_p->lit_object.type == LEXER_LITERAL_OBJECT_ARGUMENTS);
error = PARSER_ERR_ARGUMENTS_NOT_ALLOWED;
}
parser_raise_error (context_p, error);
}
context_p->token.lit_location.type = literal_type;
return;
}
}
parser_raise_error (context_p, PARSER_ERR_IDENTIFIER_EXPECTED);
} /* lexer_expect_identifier */
static const lexer_lit_location_t lexer_get_literal =
{
(const uint8_t *) "get", 3, LEXER_IDENT_LITERAL, false
};
static const lexer_lit_location_t lexer_set_literal =
{
(const uint8_t *) "set", 3, LEXER_IDENT_LITERAL, false
};
/**
* Next token must be an identifier.
*/
void
lexer_expect_object_literal_id (parser_context_t *context_p, /**< context */
bool must_be_identifier) /**< only identifiers are accepted */
{
skip_spaces (context_p);
context_p->token.line = context_p->line;
context_p->token.column = context_p->column;
if (context_p->source_p < context_p->source_end_p)
{
bool create_literal_object = false;
if (lit_char_is_identifier_start (context_p->source_p) || context_p->source_p[0] == LIT_CHAR_BACKSLASH)
{
lexer_parse_identifier (context_p, false);
if (!must_be_identifier
&& context_p->token.lit_location.length == 3)
{
skip_spaces (context_p);
if (context_p->source_p < context_p->source_end_p
&& context_p->source_p[0] != LIT_CHAR_COLON)
{
if (lexer_compare_identifier_to_current (context_p, &lexer_get_literal))
{
context_p->token.type = LEXER_PROPERTY_GETTER;
return;
}
else if (lexer_compare_identifier_to_current (context_p, &lexer_set_literal))
{
context_p->token.type = LEXER_PROPERTY_SETTER;
return;
}
}
}
create_literal_object = true;
}
else if (context_p->source_p[0] == LIT_CHAR_DOUBLE_QUOTE
|| context_p->source_p[0] == LIT_CHAR_SINGLE_QUOTE)
{
lexer_parse_string (context_p);
create_literal_object = true;
}
else if (!must_be_identifier && context_p->source_p[0] == LIT_CHAR_RIGHT_BRACE)
{
context_p->token.type = LEXER_RIGHT_BRACE;
context_p->source_p += 1;
context_p->column++;
return;
}
else
{
const uint8_t *char_p = context_p->source_p;
if (char_p[0] == LIT_CHAR_DOT)
{
char_p++;
}
if (char_p < context_p->source_end_p
&& char_p[0] >= LIT_CHAR_0
&& char_p[0] <= LIT_CHAR_9)
{
lexer_parse_number (context_p);
lexer_construct_number_object (context_p, false, false);
return;
}
}
if (create_literal_object)
{
lexer_construct_literal_object (context_p,
&context_p->token.lit_location,
LEXER_STRING_LITERAL);
return;
}
}
parser_raise_error (context_p, PARSER_ERR_PROPERTY_IDENTIFIER_EXPECTED);
} /* lexer_expect_object_literal_id */
/**
* Next token must be an identifier.
*/
void
lexer_scan_identifier (parser_context_t *context_p, /**< context */
bool propety_name) /**< property name */
{
skip_spaces (context_p);
context_p->token.line = context_p->line;
context_p->token.column = context_p->column;
if (context_p->source_p < context_p->source_end_p
&& (lit_char_is_identifier_start (context_p->source_p) || context_p->source_p[0] == LIT_CHAR_BACKSLASH))
{
lexer_parse_identifier (context_p, false);
if (propety_name && context_p->token.lit_location.length == 3)
{
skip_spaces (context_p);
if (context_p->source_p < context_p->source_end_p
&& context_p->source_p[0] != LIT_CHAR_COLON)
{
if (lexer_compare_identifier_to_current (context_p, &lexer_get_literal))
{
context_p->token.type = LEXER_PROPERTY_GETTER;
}
else if (lexer_compare_identifier_to_current (context_p, &lexer_set_literal))
{
context_p->token.type = LEXER_PROPERTY_SETTER;
}
}
}
return;
}
if (propety_name)
{
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LITERAL
|| context_p->token.type == LEXER_RIGHT_BRACE)
{
return;
}
}
parser_raise_error (context_p, PARSER_ERR_IDENTIFIER_EXPECTED);
} /* lexer_scan_identifier */
/**
* Compares the given identifier to that which is the current token
* in the parser context.
*
* @return true if the input identifiers are the same
*/
bool
lexer_compare_identifier_to_current (parser_context_t *context_p, /**< context */
const lexer_lit_location_t *right) /**< identifier */
{
lexer_lit_location_t *left = &context_p->token.lit_location;
const uint8_t *left_p;
const uint8_t *right_p;
size_t count;
JERRY_ASSERT (left->length > 0 && right->length > 0);
if (left->length != right->length)
{
return 0;
}
if (!left->has_escape && !right->has_escape)
{
return memcmp (left->char_p, right->char_p, left->length) == 0;
}
left_p = left->char_p;
right_p = right->char_p;
count = left->length;
do
{
uint8_t utf8_buf[3];
size_t utf8_len, offset;
/* Backslash cannot be part of a multibyte UTF-8 character. */
if (*left_p != LIT_CHAR_BACKSLASH && *right_p != LIT_CHAR_BACKSLASH)
{
if (*left_p++ != *right_p++)
{
return false;
}
count--;
continue;
}
if (*left_p == LIT_CHAR_BACKSLASH && *right_p == LIT_CHAR_BACKSLASH)
{
uint16_t left_chr = lexer_hex_to_character (context_p, left_p, 6);
if (left_chr != lexer_hex_to_character (context_p, right_p, 6))
{
return false;
}
left_p += 6;
right_p += 6;
count += lit_char_get_utf8_length (left_chr);
continue;
}
/* One character is encoded as unicode sequence. */
if (*right_p == LIT_CHAR_BACKSLASH)
{
/* The pointers can be swapped. */
const uint8_t *swap_p = left_p;
left_p = right_p;
right_p = swap_p;
}
utf8_len = lit_char_to_utf8_bytes (utf8_buf, lexer_hex_to_character (context_p, left_p, 6));
JERRY_ASSERT (utf8_len > 0);
count -= utf8_len;
offset = 0;
do
{
if (utf8_buf[offset] != *right_p++)
{
return false;
}
offset++;
}
while (offset < utf8_len);
left_p += 6;
}
while (count > 0);
return true;
} /* lexer_compare_identifier_to_current */
/**
* @}
* @}
* @}
*/
#endif /* JERRY_JS_PARSER */
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3380_0 |
crossvul-cpp_data_bad_4849_3 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2004 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* Tree-Structured Filter Bank (TSFB) Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <assert.h>
#include "jasper/jas_malloc.h"
#include "jasper/jas_seq.h"
#include "jpc_tsfb.h"
#include "jpc_cod.h"
#include "jpc_cs.h"
#include "jpc_util.h"
#include "jpc_math.h"
void jpc_tsfb_getbands2(jpc_tsfb_t *tsfb, int locxstart, int locystart,
int xstart, int ystart, int xend, int yend, jpc_tsfb_band_t **bands,
int numlvls);
/******************************************************************************\
*
\******************************************************************************/
jpc_tsfb_t *jpc_cod_gettsfb(int qmfbid, int numlvls)
{
jpc_tsfb_t *tsfb;
if (!(tsfb = malloc(sizeof(jpc_tsfb_t))))
return 0;
if (numlvls > 0) {
switch (qmfbid) {
case JPC_COX_INS:
tsfb->qmfb = &jpc_ns_qmfb2d;
break;
default:
case JPC_COX_RFT:
tsfb->qmfb = &jpc_ft_qmfb2d;
break;
}
} else {
tsfb->qmfb = 0;
}
tsfb->numlvls = numlvls;
return tsfb;
}
void jpc_tsfb_destroy(jpc_tsfb_t *tsfb)
{
free(tsfb);
}
int jpc_tsfb_analyze(jpc_tsfb_t *tsfb, jas_seq2d_t *a)
{
return (tsfb->numlvls > 0) ? jpc_tsfb_analyze2(tsfb, jas_seq2d_getref(a,
jas_seq2d_xstart(a), jas_seq2d_ystart(a)), jas_seq2d_xstart(a),
jas_seq2d_ystart(a), jas_seq2d_width(a),
jas_seq2d_height(a), jas_seq2d_rowstep(a), tsfb->numlvls - 1) : 0;
}
int jpc_tsfb_analyze2(jpc_tsfb_t *tsfb, int *a, int xstart, int ystart,
int width, int height, int stride, int numlvls)
{
if (width > 0 && height > 0) {
if ((*tsfb->qmfb->analyze)(a, xstart, ystart, width, height, stride))
return -1;
if (numlvls > 0) {
if (jpc_tsfb_analyze2(tsfb, a, JPC_CEILDIVPOW2(xstart,
1), JPC_CEILDIVPOW2(ystart, 1), JPC_CEILDIVPOW2(
xstart + width, 1) - JPC_CEILDIVPOW2(xstart, 1),
JPC_CEILDIVPOW2(ystart + height, 1) -
JPC_CEILDIVPOW2(ystart, 1), stride, numlvls - 1)) {
return -1;
}
}
}
return 0;
}
int jpc_tsfb_synthesize(jpc_tsfb_t *tsfb, jas_seq2d_t *a)
{
return (tsfb->numlvls > 0) ? jpc_tsfb_synthesize2(tsfb,
jas_seq2d_getref(a, jas_seq2d_xstart(a), jas_seq2d_ystart(a)),
jas_seq2d_xstart(a), jas_seq2d_ystart(a), jas_seq2d_width(a),
jas_seq2d_height(a), jas_seq2d_rowstep(a), tsfb->numlvls - 1) : 0;
}
int jpc_tsfb_synthesize2(jpc_tsfb_t *tsfb, int *a, int xstart, int ystart,
int width, int height, int stride, int numlvls)
{
if (numlvls > 0) {
if (jpc_tsfb_synthesize2(tsfb, a, JPC_CEILDIVPOW2(xstart, 1),
JPC_CEILDIVPOW2(ystart, 1), JPC_CEILDIVPOW2(xstart + width,
1) - JPC_CEILDIVPOW2(xstart, 1), JPC_CEILDIVPOW2(ystart +
height, 1) - JPC_CEILDIVPOW2(ystart, 1), stride, numlvls -
1)) {
return -1;
}
}
if (width > 0 && height > 0) {
if ((*tsfb->qmfb->synthesize)(a, xstart, ystart, width, height, stride)) {
return -1;
}
}
return 0;
}
int jpc_tsfb_getbands(jpc_tsfb_t *tsfb, uint_fast32_t xstart,
uint_fast32_t ystart, uint_fast32_t xend, uint_fast32_t yend,
jpc_tsfb_band_t *bands)
{
jpc_tsfb_band_t *band;
band = bands;
if (tsfb->numlvls > 0) {
jpc_tsfb_getbands2(tsfb, xstart, ystart, xstart, ystart, xend, yend,
&band, tsfb->numlvls);
} else {
band->xstart = xstart;
band->ystart = ystart;
band->xend = xend;
band->yend = yend;
band->locxstart = xstart;
band->locystart = ystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_LL;
band->synenergywt = JPC_FIX_ONE;
++band;
}
return band - bands;
}
void jpc_tsfb_getbands2(jpc_tsfb_t *tsfb, int locxstart, int locystart,
int xstart, int ystart, int xend, int yend, jpc_tsfb_band_t **bands,
int numlvls)
{
int newxstart;
int newystart;
int newxend;
int newyend;
jpc_tsfb_band_t *band;
newxstart = JPC_CEILDIVPOW2(xstart, 1);
newystart = JPC_CEILDIVPOW2(ystart, 1);
newxend = JPC_CEILDIVPOW2(xend, 1);
newyend = JPC_CEILDIVPOW2(yend, 1);
if (numlvls > 0) {
jpc_tsfb_getbands2(tsfb, locxstart, locystart, newxstart, newystart,
newxend, newyend, bands, numlvls - 1);
band = *bands;
band->xstart = JPC_FLOORDIVPOW2(xstart, 1);
band->ystart = newystart;
band->xend = JPC_FLOORDIVPOW2(xend, 1);
band->yend = newyend;
band->locxstart = locxstart + newxend - newxstart;
band->locystart = locystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_HL;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls] * tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls]);
++(*bands);
band = *bands;
band->xstart = newxstart;
band->ystart = JPC_FLOORDIVPOW2(ystart, 1);
band->xend = newxend;
band->yend = JPC_FLOORDIVPOW2(yend, 1);
band->locxstart = locxstart;
band->locystart = locystart + newyend - newystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_LH;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls] * tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls]);
++(*bands);
band = *bands;
band->xstart = JPC_FLOORDIVPOW2(xstart, 1);
band->ystart = JPC_FLOORDIVPOW2(ystart, 1);
band->xend = JPC_FLOORDIVPOW2(xend, 1);
band->yend = JPC_FLOORDIVPOW2(yend, 1);
band->locxstart = locxstart + newxend - newxstart;
band->locystart = locystart + newyend - newystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_HH;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls] * tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls]);
++(*bands);
} else {
band = *bands;
band->xstart = xstart;
band->ystart = ystart;
band->xend = xend;
band->yend = yend;
band->locxstart = locxstart;
band->locystart = locystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_LL;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls - 1] * tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls - 1]);
++(*bands);
}
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_4849_3 |
crossvul-cpp_data_bad_1863_0 | /*
* algif_skcipher: User-space interface for skcipher algorithms
*
* This file provides the user-space API for symmetric key ciphers.
*
* Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <crypto/if_alg.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct skcipher_sg_list {
struct list_head list;
int cur;
struct scatterlist sg[0];
};
struct skcipher_ctx {
struct list_head tsgl;
struct af_alg_sgl rsgl;
void *iv;
struct af_alg_completion completion;
atomic_t inflight;
size_t used;
unsigned int len;
bool more;
bool merge;
bool enc;
struct skcipher_request req;
};
struct skcipher_async_rsgl {
struct af_alg_sgl sgl;
struct list_head list;
};
struct skcipher_async_req {
struct kiocb *iocb;
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
char iv[];
};
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
#define GET_REQ_SIZE(ctx) \
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
#define GET_IV_SIZE(ctx) \
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
{
struct skcipher_async_rsgl *rsgl, *tmp;
struct scatterlist *sgl;
struct scatterlist *sg;
int i, n;
list_for_each_entry_safe(rsgl, tmp, &sreq->list, list) {
af_alg_free_sg(&rsgl->sgl);
if (rsgl != &sreq->first_sgl)
kfree(rsgl);
}
sgl = sreq->tsg;
n = sg_nents(sgl);
for_each_sg(sgl, sg, n, i)
put_page(sg_page(sg));
kfree(sreq->tsg);
}
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
struct sock *sk = req->data;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
struct kiocb *iocb = sreq->iocb;
atomic_dec(&ctx->inflight);
skcipher_free_async_sgls(sreq);
kfree(req);
iocb->ki_complete(iocb, err, err);
}
static inline int skcipher_sndbuf(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
return max_t(int, max_t(int, sk->sk_sndbuf & PAGE_MASK, PAGE_SIZE) -
ctx->used, 0);
}
static inline bool skcipher_writable(struct sock *sk)
{
return PAGE_SIZE <= skcipher_sndbuf(sk);
}
static int skcipher_alloc_sgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg = NULL;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
if (!list_empty(&ctx->tsgl))
sg = sgl->sg;
if (!sg || sgl->cur >= MAX_SGL_ENTS) {
sgl = sock_kmalloc(sk, sizeof(*sgl) +
sizeof(sgl->sg[0]) * (MAX_SGL_ENTS + 1),
GFP_KERNEL);
if (!sgl)
return -ENOMEM;
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
if (sg)
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
list_add_tail(&sgl->list, &ctx->tsgl);
}
return 0;
}
static void skcipher_pull_sgl(struct sock *sk, size_t used, int put)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int i;
while (!list_empty(&ctx->tsgl)) {
sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list,
list);
sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t plen = min_t(size_t, used, sg[i].length);
if (!sg_page(sg + i))
continue;
sg[i].length -= plen;
sg[i].offset += plen;
used -= plen;
ctx->used -= plen;
if (sg[i].length)
return;
if (put)
put_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
}
list_del(&sgl->list);
sock_kfree_s(sk, sgl,
sizeof(*sgl) + sizeof(sgl->sg[0]) *
(MAX_SGL_ENTS + 1));
}
if (!ctx->used)
ctx->merge = 0;
}
static void skcipher_free_sgl(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
skcipher_pull_sgl(sk, ctx->used, 1);
}
static int skcipher_wait_for_wmem(struct sock *sk, unsigned flags)
{
long timeout;
DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT)
return -EAGAIN;
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
for (;;) {
if (signal_pending(current))
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout, skcipher_writable(sk))) {
err = 0;
break;
}
}
finish_wait(sk_sleep(sk), &wait);
return err;
}
static void skcipher_wmem_wakeup(struct sock *sk)
{
struct socket_wq *wq;
if (!skcipher_writable(sk))
return;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLIN |
POLLRDNORM |
POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
static int skcipher_wait_for_data(struct sock *sk, unsigned flags)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
long timeout;
DEFINE_WAIT(wait);
int err = -ERESTARTSYS;
if (flags & MSG_DONTWAIT) {
return -EAGAIN;
}
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
for (;;) {
if (signal_pending(current))
break;
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = MAX_SCHEDULE_TIMEOUT;
if (sk_wait_event(sk, &timeout, ctx->used)) {
err = 0;
break;
}
}
finish_wait(sk_sleep(sk), &wait);
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
return err;
}
static void skcipher_data_wakeup(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct socket_wq *wq;
if (!ctx->used)
return;
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
if (wq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
POLLRDNORM |
POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
rcu_read_unlock();
}
static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
long copied = 0;
bool enc = 0;
bool init = 0;
int err;
int i;
if (msg->msg_controllen) {
err = af_alg_cmsg_send(msg, &con);
if (err)
return err;
init = 1;
switch (con.op) {
case ALG_OP_ENCRYPT:
enc = 1;
break;
case ALG_OP_DECRYPT:
enc = 0;
break;
default:
return -EINVAL;
}
if (con.iv && con.iv->ivlen != ivsize)
return -EINVAL;
}
err = -EINVAL;
lock_sock(sk);
if (!ctx->more && ctx->used)
goto unlock;
if (init) {
ctx->enc = enc;
if (con.iv)
memcpy(ctx->iv, con.iv->iv, ivsize);
}
while (size) {
struct scatterlist *sg;
unsigned long len = size;
size_t plen;
if (ctx->merge) {
sgl = list_entry(ctx->tsgl.prev,
struct skcipher_sg_list, list);
sg = sgl->sg + sgl->cur - 1;
len = min_t(unsigned long, len,
PAGE_SIZE - sg->offset - sg->length);
err = memcpy_from_msg(page_address(sg_page(sg)) +
sg->offset + sg->length,
msg, len);
if (err)
goto unlock;
sg->length += len;
ctx->merge = (sg->offset + sg->length) &
(PAGE_SIZE - 1);
ctx->used += len;
copied += len;
size -= len;
continue;
}
if (!skcipher_writable(sk)) {
err = skcipher_wait_for_wmem(sk, msg->msg_flags);
if (err)
goto unlock;
}
len = min_t(unsigned long, len, skcipher_sndbuf(sk));
err = skcipher_alloc_sgl(sk);
if (err)
goto unlock;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
sg = sgl->sg;
sg_unmark_end(sg + sgl->cur);
do {
i = sgl->cur;
plen = min_t(size_t, len, PAGE_SIZE);
sg_assign_page(sg + i, alloc_page(GFP_KERNEL));
err = -ENOMEM;
if (!sg_page(sg + i))
goto unlock;
err = memcpy_from_msg(page_address(sg_page(sg + i)),
msg, plen);
if (err) {
__free_page(sg_page(sg + i));
sg_assign_page(sg + i, NULL);
goto unlock;
}
sg[i].length = plen;
len -= plen;
ctx->used += plen;
copied += plen;
size -= plen;
sgl->cur++;
} while (len && sgl->cur < MAX_SGL_ENTS);
if (!size)
sg_mark_end(sg + sgl->cur - 1);
ctx->merge = plen & (PAGE_SIZE - 1);
}
err = 0;
ctx->more = msg->msg_flags & MSG_MORE;
unlock:
skcipher_data_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
static ssize_t skcipher_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
int err = -EINVAL;
if (flags & MSG_SENDPAGE_NOTLAST)
flags |= MSG_MORE;
lock_sock(sk);
if (!ctx->more && ctx->used)
goto unlock;
if (!size)
goto done;
if (!skcipher_writable(sk)) {
err = skcipher_wait_for_wmem(sk, flags);
if (err)
goto unlock;
}
err = skcipher_alloc_sgl(sk);
if (err)
goto unlock;
ctx->merge = 0;
sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list);
if (sgl->cur)
sg_unmark_end(sgl->sg + sgl->cur - 1);
sg_mark_end(sgl->sg + sgl->cur);
get_page(page);
sg_set_page(sgl->sg + sgl->cur, page, size, offset);
sgl->cur++;
ctx->used += size;
done:
ctx->more = flags & MSG_MORE;
unlock:
skcipher_data_wakeup(sk);
release_sock(sk);
return err ?: size;
}
static int skcipher_all_sg_nents(struct skcipher_ctx *ctx)
{
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int nents = 0;
list_for_each_entry(sgl, &ctx->tsgl, list) {
sg = sgl->sg;
while (!sg->length)
sg++;
nents += sg_nents(sg);
}
return nents;
}
static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
unsigned int reqlen = sizeof(struct skcipher_async_req) +
GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
int err = -ENOMEM;
bool mark = false;
lock_sock(sk);
req = kmalloc(reqlen, GFP_KERNEL);
if (unlikely(!req))
goto unlock;
sreq = GET_SREQ(req, ctx);
sreq->iocb = msg->msg_iocb;
memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
INIT_LIST_HEAD(&sreq->list);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
if (unlikely(!sreq->tsg)) {
kfree(req);
goto unlock;
}
sg_init_table(sreq->tsg, tx_nents);
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
skcipher_async_cb, sk);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
int used;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto free;
}
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
used = min_t(unsigned long, ctx->used,
iov_iter_count(&msg->msg_iter));
used = min_t(unsigned long, used, sg->length);
if (txbufs == tx_nents) {
struct scatterlist *tmp;
int x;
/* Ran out of tx slots in async request
* need to expand */
tmp = kcalloc(tx_nents * 2, sizeof(*tmp),
GFP_KERNEL);
if (!tmp)
goto free;
sg_init_table(tmp, tx_nents * 2);
for (x = 0; x < tx_nents; x++)
sg_set_page(&tmp[x], sg_page(&sreq->tsg[x]),
sreq->tsg[x].length,
sreq->tsg[x].offset);
kfree(sreq->tsg);
sreq->tsg = tmp;
tx_nents *= 2;
mark = true;
}
/* Need to take over the tx sgl from ctx
* to the asynch req - these sgls will be freed later */
sg_set_page(sreq->tsg + txbufs++, sg_page(sg), sg->length,
sg->offset);
if (list_empty(&sreq->list)) {
rsgl = &sreq->first_sgl;
list_add_tail(&rsgl->list, &sreq->list);
} else {
rsgl = kmalloc(sizeof(*rsgl), GFP_KERNEL);
if (!rsgl) {
err = -ENOMEM;
goto free;
}
list_add_tail(&rsgl->list, &sreq->list);
}
used = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, used);
err = used;
if (used < 0)
goto free;
if (last_rsgl)
af_alg_link_sg(&last_rsgl->sgl, &rsgl->sgl);
last_rsgl = rsgl;
len += used;
skcipher_pull_sgl(sk, used, 0);
iov_iter_advance(&msg->msg_iter, used);
}
if (mark)
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
len, sreq->iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return err;
}
static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
&ctx->req));
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
int used;
long copied = 0;
lock_sock(sk);
while (msg_data_left(msg)) {
sgl = list_first_entry(&ctx->tsgl,
struct skcipher_sg_list, list);
sg = sgl->sg;
while (!sg->length)
sg++;
if (!ctx->used) {
err = skcipher_wait_for_data(sk, flags);
if (err)
goto unlock;
}
used = min_t(unsigned long, ctx->used, msg_data_left(msg));
used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used);
err = used;
if (err < 0)
goto unlock;
if (ctx->more || used < ctx->used)
used -= used % bs;
err = -EINVAL;
if (!used)
goto free;
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
ctx->iv);
err = af_alg_wait_for_completion(
ctx->enc ?
crypto_skcipher_encrypt(&ctx->req) :
crypto_skcipher_decrypt(&ctx->req),
&ctx->completion);
free:
af_alg_free_sg(&ctx->rsgl);
if (err)
goto unlock;
copied += used;
skcipher_pull_sgl(sk, used, 1);
iov_iter_advance(&msg->msg_iter, used);
}
err = 0;
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
return copied ?: err;
}
static int skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
return (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) ?
skcipher_recvmsg_async(sock, msg, flags) :
skcipher_recvmsg_sync(sock, msg, flags);
}
static unsigned int skcipher_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
if (ctx->used)
mask |= POLLIN | POLLRDNORM;
if (skcipher_writable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
}
static struct proto_ops algif_skcipher_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.getsockopt = sock_no_getsockopt,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.setsockopt = sock_no_setsockopt,
.release = af_alg_release,
.sendmsg = skcipher_sendmsg,
.sendpage = skcipher_sendpage,
.recvmsg = skcipher_recvmsg,
.poll = skcipher_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_skcipher(name, type, mask);
}
static void skcipher_release(void *private)
{
crypto_free_skcipher(private);
}
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_skcipher_setkey(private, key, keylen);
}
static void skcipher_wait(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
int ctr = 0;
while (atomic_read(&ctx->inflight) && ctr++ < 100)
msleep(100);
}
static void skcipher_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct skcipher_ctx *ctx = ask->private;
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
if (atomic_read(&ctx->inflight))
skcipher_wait(sk);
skcipher_free_sgl(sk);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int skcipher_accept_parent(void *private, struct sock *sk)
{
struct skcipher_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
INIT_LIST_HEAD(&ctx->tsgl);
ctx->len = len;
ctx->used = 0;
ctx->more = 0;
ctx->merge = 0;
ctx->enc = 0;
atomic_set(&ctx->inflight, 0);
af_alg_init_completion(&ctx->completion);
ask->private = ctx;
skcipher_request_set_tfm(&ctx->req, private);
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
return 0;
}
static const struct af_alg_type algif_type_skcipher = {
.bind = skcipher_bind,
.release = skcipher_release,
.setkey = skcipher_setkey,
.accept = skcipher_accept_parent,
.ops = &algif_skcipher_ops,
.name = "skcipher",
.owner = THIS_MODULE
};
static int __init algif_skcipher_init(void)
{
return af_alg_register_type(&algif_type_skcipher);
}
static void __exit algif_skcipher_exit(void)
{
int err = af_alg_unregister_type(&algif_type_skcipher);
BUG_ON(err);
}
module_init(algif_skcipher_init);
module_exit(algif_skcipher_exit);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_1863_0 |
crossvul-cpp_data_bad_272_4 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-476/c/bad_272_4 |
crossvul-cpp_data_good_528_3 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
/*
* Terminal window support, see ":help :terminal".
*
* There are three parts:
* 1. Generic code for all systems.
* Uses libvterm for the terminal emulator.
* 2. The MS-Windows implementation.
* Uses winpty.
* 3. The Unix-like implementation.
* Uses pseudo-tty's (pty's).
*
* For each terminal one VTerm is constructed. This uses libvterm. A copy of
* this library is in the libvterm directory.
*
* When a terminal window is opened, a job is started that will be connected to
* the terminal emulator.
*
* If the terminal window has keyboard focus, typed keys are converted to the
* terminal encoding and writing to the job over a channel.
*
* If the job produces output, it is written to the terminal emulator. The
* terminal emulator invokes callbacks when its screen content changes. The
* line range is stored in tl_dirty_row_start and tl_dirty_row_end. Once in a
* while, if the terminal window is visible, the screen contents is drawn.
*
* When the job ends the text is put in a buffer. Redrawing then happens from
* that buffer, attributes come from the scrollback buffer tl_scrollback.
* When the buffer is changed it is turned into a normal buffer, the attributes
* in tl_scrollback are no longer used.
*/
#include "vim.h"
#if defined(FEAT_TERMINAL) || defined(PROTO)
#ifndef MIN
# define MIN(x,y) ((x) < (y) ? (x) : (y))
#endif
#ifndef MAX
# define MAX(x,y) ((x) > (y) ? (x) : (y))
#endif
#include "libvterm/include/vterm.h"
/* This is VTermScreenCell without the characters, thus much smaller. */
typedef struct {
VTermScreenCellAttrs attrs;
char width;
VTermColor fg;
VTermColor bg;
} cellattr_T;
typedef struct sb_line_S {
int sb_cols; /* can differ per line */
cellattr_T *sb_cells; /* allocated */
cellattr_T sb_fill_attr; /* for short line */
} sb_line_T;
/* typedef term_T in structs.h */
struct terminal_S {
term_T *tl_next;
VTerm *tl_vterm;
job_T *tl_job;
buf_T *tl_buffer;
#if defined(FEAT_GUI)
int tl_system; /* when non-zero used for :!cmd output */
int tl_toprow; /* row with first line of system terminal */
#endif
/* Set when setting the size of a vterm, reset after redrawing. */
int tl_vterm_size_changed;
int tl_normal_mode; /* TRUE: Terminal-Normal mode */
int tl_channel_closed;
int tl_channel_recently_closed; // still need to handle tl_finish
int tl_finish;
#define TL_FINISH_UNSET NUL
#define TL_FINISH_CLOSE 'c' /* ++close or :terminal without argument */
#define TL_FINISH_NOCLOSE 'n' /* ++noclose */
#define TL_FINISH_OPEN 'o' /* ++open */
char_u *tl_opencmd;
char_u *tl_eof_chars;
#ifdef WIN3264
void *tl_winpty_config;
void *tl_winpty;
FILE *tl_out_fd;
#endif
#if defined(FEAT_SESSION)
char_u *tl_command;
#endif
char_u *tl_kill;
/* last known vterm size */
int tl_rows;
int tl_cols;
char_u *tl_title; /* NULL or allocated */
char_u *tl_status_text; /* NULL or allocated */
/* Range of screen rows to update. Zero based. */
int tl_dirty_row_start; /* MAX_ROW if nothing dirty */
int tl_dirty_row_end; /* row below last one to update */
int tl_dirty_snapshot; /* text updated after making snapshot */
#ifdef FEAT_TIMERS
int tl_timer_set;
proftime_T tl_timer_due;
#endif
int tl_postponed_scroll; /* to be scrolled up */
garray_T tl_scrollback;
int tl_scrollback_scrolled;
cellattr_T tl_default_color;
linenr_T tl_top_diff_rows; /* rows of top diff file or zero */
linenr_T tl_bot_diff_rows; /* rows of bottom diff file */
VTermPos tl_cursor_pos;
int tl_cursor_visible;
int tl_cursor_blink;
int tl_cursor_shape; /* 1: block, 2: underline, 3: bar */
char_u *tl_cursor_color; /* NULL or allocated */
int tl_using_altscreen;
};
#define TMODE_ONCE 1 /* CTRL-\ CTRL-N used */
#define TMODE_LOOP 2 /* CTRL-W N used */
/*
* List of all active terminals.
*/
static term_T *first_term = NULL;
/* Terminal active in terminal_loop(). */
static term_T *in_terminal_loop = NULL;
#define MAX_ROW 999999 /* used for tl_dirty_row_end to update all rows */
#define KEY_BUF_LEN 200
/*
* Functions with separate implementation for MS-Windows and Unix-like systems.
*/
static int term_and_job_init(term_T *term, typval_T *argvar, char **argv, jobopt_T *opt, jobopt_T *orig_opt);
static int create_pty_only(term_T *term, jobopt_T *opt);
static void term_report_winsize(term_T *term, int rows, int cols);
static void term_free_vterm(term_T *term);
#ifdef FEAT_GUI
static void update_system_term(term_T *term);
#endif
/* The character that we know (or assume) that the terminal expects for the
* backspace key. */
static int term_backspace_char = BS;
/* "Terminal" highlight group colors. */
static int term_default_cterm_fg = -1;
static int term_default_cterm_bg = -1;
/* Store the last set and the desired cursor properties, so that we only update
* them when needed. Doing it unnecessary may result in flicker. */
static char_u *last_set_cursor_color = NULL;
static char_u *desired_cursor_color = NULL;
static int last_set_cursor_shape = -1;
static int desired_cursor_shape = -1;
static int last_set_cursor_blink = -1;
static int desired_cursor_blink = -1;
/**************************************
* 1. Generic code for all systems.
*/
static int
cursor_color_equal(char_u *lhs_color, char_u *rhs_color)
{
if (lhs_color != NULL && rhs_color != NULL)
return STRCMP(lhs_color, rhs_color) == 0;
return lhs_color == NULL && rhs_color == NULL;
}
static void
cursor_color_copy(char_u **to_color, char_u *from_color)
{
// Avoid a free & alloc if the value is already right.
if (cursor_color_equal(*to_color, from_color))
return;
vim_free(*to_color);
*to_color = (from_color == NULL) ? NULL : vim_strsave(from_color);
}
static char_u *
cursor_color_get(char_u *color)
{
return (color == NULL) ? (char_u *)"" : color;
}
/*
* Parse 'termwinsize' and set "rows" and "cols" for the terminal size in the
* current window.
* Sets "rows" and/or "cols" to zero when it should follow the window size.
* Return TRUE if the size is the minimum size: "24*80".
*/
static int
parse_termwinsize(win_T *wp, int *rows, int *cols)
{
int minsize = FALSE;
*rows = 0;
*cols = 0;
if (*wp->w_p_tws != NUL)
{
char_u *p = vim_strchr(wp->w_p_tws, 'x');
/* Syntax of value was already checked when it's set. */
if (p == NULL)
{
minsize = TRUE;
p = vim_strchr(wp->w_p_tws, '*');
}
*rows = atoi((char *)wp->w_p_tws);
*cols = atoi((char *)p + 1);
}
return minsize;
}
/*
* Determine the terminal size from 'termwinsize' and the current window.
*/
static void
set_term_and_win_size(term_T *term)
{
#ifdef FEAT_GUI
if (term->tl_system)
{
/* Use the whole screen for the system command. However, it will start
* at the command line and scroll up as needed, using tl_toprow. */
term->tl_rows = Rows;
term->tl_cols = Columns;
return;
}
#endif
if (parse_termwinsize(curwin, &term->tl_rows, &term->tl_cols))
{
if (term->tl_rows != 0)
term->tl_rows = MAX(term->tl_rows, curwin->w_height);
if (term->tl_cols != 0)
term->tl_cols = MAX(term->tl_cols, curwin->w_width);
}
if (term->tl_rows == 0)
term->tl_rows = curwin->w_height;
else
win_setheight_win(term->tl_rows, curwin);
if (term->tl_cols == 0)
term->tl_cols = curwin->w_width;
else
win_setwidth_win(term->tl_cols, curwin);
}
/*
* Initialize job options for a terminal job.
* Caller may overrule some of them.
*/
void
init_job_options(jobopt_T *opt)
{
clear_job_options(opt);
opt->jo_mode = MODE_RAW;
opt->jo_out_mode = MODE_RAW;
opt->jo_err_mode = MODE_RAW;
opt->jo_set = JO_MODE | JO_OUT_MODE | JO_ERR_MODE;
}
/*
* Set job options mandatory for a terminal job.
*/
static void
setup_job_options(jobopt_T *opt, int rows, int cols)
{
#ifndef WIN3264
/* Win32: Redirecting the job output won't work, thus always connect stdout
* here. */
if (!(opt->jo_set & JO_OUT_IO))
#endif
{
/* Connect stdout to the terminal. */
opt->jo_io[PART_OUT] = JIO_BUFFER;
opt->jo_io_buf[PART_OUT] = curbuf->b_fnum;
opt->jo_modifiable[PART_OUT] = 0;
opt->jo_set |= JO_OUT_IO + JO_OUT_BUF + JO_OUT_MODIFIABLE;
}
#ifndef WIN3264
/* Win32: Redirecting the job output won't work, thus always connect stderr
* here. */
if (!(opt->jo_set & JO_ERR_IO))
#endif
{
/* Connect stderr to the terminal. */
opt->jo_io[PART_ERR] = JIO_BUFFER;
opt->jo_io_buf[PART_ERR] = curbuf->b_fnum;
opt->jo_modifiable[PART_ERR] = 0;
opt->jo_set |= JO_ERR_IO + JO_ERR_BUF + JO_ERR_MODIFIABLE;
}
opt->jo_pty = TRUE;
if ((opt->jo_set2 & JO2_TERM_ROWS) == 0)
opt->jo_term_rows = rows;
if ((opt->jo_set2 & JO2_TERM_COLS) == 0)
opt->jo_term_cols = cols;
}
/*
* Close a terminal buffer (and its window). Used when creating the terminal
* fails.
*/
static void
term_close_buffer(buf_T *buf, buf_T *old_curbuf)
{
free_terminal(buf);
if (old_curbuf != NULL)
{
--curbuf->b_nwindows;
curbuf = old_curbuf;
curwin->w_buffer = curbuf;
++curbuf->b_nwindows;
}
/* Wiping out the buffer will also close the window and call
* free_terminal(). */
do_buffer(DOBUF_WIPE, DOBUF_FIRST, FORWARD, buf->b_fnum, TRUE);
}
/*
* Start a terminal window and return its buffer.
* Use either "argvar" or "argv", the other must be NULL.
* When "flags" has TERM_START_NOJOB only create the buffer, b_term and open
* the window.
* Returns NULL when failed.
*/
buf_T *
term_start(
typval_T *argvar,
char **argv,
jobopt_T *opt,
int flags)
{
exarg_T split_ea;
win_T *old_curwin = curwin;
term_T *term;
buf_T *old_curbuf = NULL;
int res;
buf_T *newbuf;
int vertical = opt->jo_vertical || (cmdmod.split & WSP_VERT);
jobopt_T orig_opt; // only partly filled
if (check_restricted() || check_secure())
return NULL;
if ((opt->jo_set & (JO_IN_IO + JO_OUT_IO + JO_ERR_IO))
== (JO_IN_IO + JO_OUT_IO + JO_ERR_IO)
|| (!(opt->jo_set & JO_OUT_IO) && (opt->jo_set & JO_OUT_BUF))
|| (!(opt->jo_set & JO_ERR_IO) && (opt->jo_set & JO_ERR_BUF)))
{
EMSG(_(e_invarg));
return NULL;
}
term = (term_T *)alloc_clear(sizeof(term_T));
if (term == NULL)
return NULL;
term->tl_dirty_row_end = MAX_ROW;
term->tl_cursor_visible = TRUE;
term->tl_cursor_shape = VTERM_PROP_CURSORSHAPE_BLOCK;
term->tl_finish = opt->jo_term_finish;
#ifdef FEAT_GUI
term->tl_system = (flags & TERM_START_SYSTEM);
#endif
ga_init2(&term->tl_scrollback, sizeof(sb_line_T), 300);
vim_memset(&split_ea, 0, sizeof(split_ea));
if (opt->jo_curwin)
{
/* Create a new buffer in the current window. */
if (!can_abandon(curbuf, flags & TERM_START_FORCEIT))
{
no_write_message();
vim_free(term);
return NULL;
}
if (do_ecmd(0, NULL, NULL, &split_ea, ECMD_ONE,
ECMD_HIDE
+ ((flags & TERM_START_FORCEIT) ? ECMD_FORCEIT : 0),
curwin) == FAIL)
{
vim_free(term);
return NULL;
}
}
else if (opt->jo_hidden || (flags & TERM_START_SYSTEM))
{
buf_T *buf;
/* Create a new buffer without a window. Make it the current buffer for
* a moment to be able to do the initialisations. */
buf = buflist_new((char_u *)"", NULL, (linenr_T)0,
BLN_NEW | BLN_LISTED);
if (buf == NULL || ml_open(buf) == FAIL)
{
vim_free(term);
return NULL;
}
old_curbuf = curbuf;
--curbuf->b_nwindows;
curbuf = buf;
curwin->w_buffer = buf;
++curbuf->b_nwindows;
}
else
{
/* Open a new window or tab. */
split_ea.cmdidx = CMD_new;
split_ea.cmd = (char_u *)"new";
split_ea.arg = (char_u *)"";
if (opt->jo_term_rows > 0 && !vertical)
{
split_ea.line2 = opt->jo_term_rows;
split_ea.addr_count = 1;
}
if (opt->jo_term_cols > 0 && vertical)
{
split_ea.line2 = opt->jo_term_cols;
split_ea.addr_count = 1;
}
if (vertical)
cmdmod.split |= WSP_VERT;
ex_splitview(&split_ea);
if (curwin == old_curwin)
{
/* split failed */
vim_free(term);
return NULL;
}
}
term->tl_buffer = curbuf;
curbuf->b_term = term;
if (!opt->jo_hidden)
{
/* Only one size was taken care of with :new, do the other one. With
* "curwin" both need to be done. */
if (opt->jo_term_rows > 0 && (opt->jo_curwin || vertical))
win_setheight(opt->jo_term_rows);
if (opt->jo_term_cols > 0 && (opt->jo_curwin || !vertical))
win_setwidth(opt->jo_term_cols);
}
/* Link the new terminal in the list of active terminals. */
term->tl_next = first_term;
first_term = term;
if (opt->jo_term_name != NULL)
curbuf->b_ffname = vim_strsave(opt->jo_term_name);
else if (argv != NULL)
curbuf->b_ffname = vim_strsave((char_u *)"!system");
else
{
int i;
size_t len;
char_u *cmd, *p;
if (argvar->v_type == VAR_STRING)
{
cmd = argvar->vval.v_string;
if (cmd == NULL)
cmd = (char_u *)"";
else if (STRCMP(cmd, "NONE") == 0)
cmd = (char_u *)"pty";
}
else if (argvar->v_type != VAR_LIST
|| argvar->vval.v_list == NULL
|| argvar->vval.v_list->lv_len < 1
|| (cmd = tv_get_string_chk(
&argvar->vval.v_list->lv_first->li_tv)) == NULL)
cmd = (char_u*)"";
len = STRLEN(cmd) + 10;
p = alloc((int)len);
for (i = 0; p != NULL; ++i)
{
/* Prepend a ! to the command name to avoid the buffer name equals
* the executable, otherwise ":w!" would overwrite it. */
if (i == 0)
vim_snprintf((char *)p, len, "!%s", cmd);
else
vim_snprintf((char *)p, len, "!%s (%d)", cmd, i);
if (buflist_findname(p) == NULL)
{
vim_free(curbuf->b_ffname);
curbuf->b_ffname = p;
break;
}
}
}
curbuf->b_fname = curbuf->b_ffname;
if (opt->jo_term_opencmd != NULL)
term->tl_opencmd = vim_strsave(opt->jo_term_opencmd);
if (opt->jo_eof_chars != NULL)
term->tl_eof_chars = vim_strsave(opt->jo_eof_chars);
set_string_option_direct((char_u *)"buftype", -1,
(char_u *)"terminal", OPT_FREE|OPT_LOCAL, 0);
// Avoid that 'buftype' is reset when this buffer is entered.
curbuf->b_p_initialized = TRUE;
/* Mark the buffer as not modifiable. It can only be made modifiable after
* the job finished. */
curbuf->b_p_ma = FALSE;
set_term_and_win_size(term);
#ifdef WIN3264
mch_memmove(orig_opt.jo_io, opt->jo_io, sizeof(orig_opt.jo_io));
#endif
setup_job_options(opt, term->tl_rows, term->tl_cols);
if (flags & TERM_START_NOJOB)
return curbuf;
#if defined(FEAT_SESSION)
/* Remember the command for the session file. */
if (opt->jo_term_norestore || argv != NULL)
{
term->tl_command = vim_strsave((char_u *)"NONE");
}
else if (argvar->v_type == VAR_STRING)
{
char_u *cmd = argvar->vval.v_string;
if (cmd != NULL && STRCMP(cmd, p_sh) != 0)
term->tl_command = vim_strsave(cmd);
}
else if (argvar->v_type == VAR_LIST
&& argvar->vval.v_list != NULL
&& argvar->vval.v_list->lv_len > 0)
{
garray_T ga;
listitem_T *item;
ga_init2(&ga, 1, 100);
for (item = argvar->vval.v_list->lv_first;
item != NULL; item = item->li_next)
{
char_u *s = tv_get_string_chk(&item->li_tv);
char_u *p;
if (s == NULL)
break;
p = vim_strsave_fnameescape(s, FALSE);
if (p == NULL)
break;
ga_concat(&ga, p);
vim_free(p);
ga_append(&ga, ' ');
}
if (item == NULL)
{
ga_append(&ga, NUL);
term->tl_command = ga.ga_data;
}
else
ga_clear(&ga);
}
#endif
if (opt->jo_term_kill != NULL)
{
char_u *p = skiptowhite(opt->jo_term_kill);
term->tl_kill = vim_strnsave(opt->jo_term_kill, p - opt->jo_term_kill);
}
/* System dependent: setup the vterm and maybe start the job in it. */
if (argv == NULL
&& argvar->v_type == VAR_STRING
&& argvar->vval.v_string != NULL
&& STRCMP(argvar->vval.v_string, "NONE") == 0)
res = create_pty_only(term, opt);
else
res = term_and_job_init(term, argvar, argv, opt, &orig_opt);
newbuf = curbuf;
if (res == OK)
{
/* Get and remember the size we ended up with. Update the pty. */
vterm_get_size(term->tl_vterm, &term->tl_rows, &term->tl_cols);
term_report_winsize(term, term->tl_rows, term->tl_cols);
#ifdef FEAT_GUI
if (term->tl_system)
{
/* display first line below typed command */
term->tl_toprow = msg_row + 1;
term->tl_dirty_row_end = 0;
}
#endif
/* Make sure we don't get stuck on sending keys to the job, it leads to
* a deadlock if the job is waiting for Vim to read. */
channel_set_nonblock(term->tl_job->jv_channel, PART_IN);
if (old_curbuf != NULL)
{
--curbuf->b_nwindows;
curbuf = old_curbuf;
curwin->w_buffer = curbuf;
++curbuf->b_nwindows;
}
}
else
{
term_close_buffer(curbuf, old_curbuf);
return NULL;
}
apply_autocmds(EVENT_TERMINALOPEN, NULL, NULL, FALSE, newbuf);
return newbuf;
}
/*
* ":terminal": open a terminal window and execute a job in it.
*/
void
ex_terminal(exarg_T *eap)
{
typval_T argvar[2];
jobopt_T opt;
char_u *cmd;
char_u *tofree = NULL;
init_job_options(&opt);
cmd = eap->arg;
while (*cmd == '+' && *(cmd + 1) == '+')
{
char_u *p, *ep;
cmd += 2;
p = skiptowhite(cmd);
ep = vim_strchr(cmd, '=');
if (ep != NULL && ep < p)
p = ep;
if ((int)(p - cmd) == 5 && STRNICMP(cmd, "close", 5) == 0)
opt.jo_term_finish = 'c';
else if ((int)(p - cmd) == 7 && STRNICMP(cmd, "noclose", 7) == 0)
opt.jo_term_finish = 'n';
else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "open", 4) == 0)
opt.jo_term_finish = 'o';
else if ((int)(p - cmd) == 6 && STRNICMP(cmd, "curwin", 6) == 0)
opt.jo_curwin = 1;
else if ((int)(p - cmd) == 6 && STRNICMP(cmd, "hidden", 6) == 0)
opt.jo_hidden = 1;
else if ((int)(p - cmd) == 9 && STRNICMP(cmd, "norestore", 9) == 0)
opt.jo_term_norestore = 1;
else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "kill", 4) == 0
&& ep != NULL)
{
opt.jo_set2 |= JO2_TERM_KILL;
opt.jo_term_kill = ep + 1;
p = skiptowhite(cmd);
}
else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "rows", 4) == 0
&& ep != NULL && isdigit(ep[1]))
{
opt.jo_set2 |= JO2_TERM_ROWS;
opt.jo_term_rows = atoi((char *)ep + 1);
p = skiptowhite(cmd);
}
else if ((int)(p - cmd) == 4 && STRNICMP(cmd, "cols", 4) == 0
&& ep != NULL && isdigit(ep[1]))
{
opt.jo_set2 |= JO2_TERM_COLS;
opt.jo_term_cols = atoi((char *)ep + 1);
p = skiptowhite(cmd);
}
else if ((int)(p - cmd) == 3 && STRNICMP(cmd, "eof", 3) == 0
&& ep != NULL)
{
char_u *buf = NULL;
char_u *keys;
p = skiptowhite(cmd);
*p = NUL;
keys = replace_termcodes(ep + 1, &buf, TRUE, TRUE, TRUE);
opt.jo_set2 |= JO2_EOF_CHARS;
opt.jo_eof_chars = vim_strsave(keys);
vim_free(buf);
*p = ' ';
}
else
{
if (*p)
*p = NUL;
EMSG2(_("E181: Invalid attribute: %s"), cmd);
goto theend;
}
cmd = skipwhite(p);
}
if (*cmd == NUL)
{
/* Make a copy of 'shell', an autocommand may change the option. */
tofree = cmd = vim_strsave(p_sh);
/* default to close when the shell exits */
if (opt.jo_term_finish == NUL)
opt.jo_term_finish = 'c';
}
if (eap->addr_count > 0)
{
/* Write lines from current buffer to the job. */
opt.jo_set |= JO_IN_IO | JO_IN_BUF | JO_IN_TOP | JO_IN_BOT;
opt.jo_io[PART_IN] = JIO_BUFFER;
opt.jo_io_buf[PART_IN] = curbuf->b_fnum;
opt.jo_in_top = eap->line1;
opt.jo_in_bot = eap->line2;
}
argvar[0].v_type = VAR_STRING;
argvar[0].vval.v_string = cmd;
argvar[1].v_type = VAR_UNKNOWN;
term_start(argvar, NULL, &opt, eap->forceit ? TERM_START_FORCEIT : 0);
vim_free(tofree);
theend:
vim_free(opt.jo_eof_chars);
}
#if defined(FEAT_SESSION) || defined(PROTO)
/*
* Write a :terminal command to the session file to restore the terminal in
* window "wp".
* Return FAIL if writing fails.
*/
int
term_write_session(FILE *fd, win_T *wp)
{
term_T *term = wp->w_buffer->b_term;
/* Create the terminal and run the command. This is not without
* risk, but let's assume the user only creates a session when this
* will be OK. */
if (fprintf(fd, "terminal ++curwin ++cols=%d ++rows=%d ",
term->tl_cols, term->tl_rows) < 0)
return FAIL;
if (term->tl_command != NULL && fputs((char *)term->tl_command, fd) < 0)
return FAIL;
return put_eol(fd);
}
/*
* Return TRUE if "buf" has a terminal that should be restored.
*/
int
term_should_restore(buf_T *buf)
{
term_T *term = buf->b_term;
return term != NULL && (term->tl_command == NULL
|| STRCMP(term->tl_command, "NONE") != 0);
}
#endif
/*
* Free the scrollback buffer for "term".
*/
static void
free_scrollback(term_T *term)
{
int i;
for (i = 0; i < term->tl_scrollback.ga_len; ++i)
vim_free(((sb_line_T *)term->tl_scrollback.ga_data + i)->sb_cells);
ga_clear(&term->tl_scrollback);
}
/*
* Free a terminal and everything it refers to.
* Kills the job if there is one.
* Called when wiping out a buffer.
*/
void
free_terminal(buf_T *buf)
{
term_T *term = buf->b_term;
term_T *tp;
if (term == NULL)
return;
if (first_term == term)
first_term = term->tl_next;
else
for (tp = first_term; tp->tl_next != NULL; tp = tp->tl_next)
if (tp->tl_next == term)
{
tp->tl_next = term->tl_next;
break;
}
if (term->tl_job != NULL)
{
if (term->tl_job->jv_status != JOB_ENDED
&& term->tl_job->jv_status != JOB_FINISHED
&& term->tl_job->jv_status != JOB_FAILED)
job_stop(term->tl_job, NULL, "kill");
job_unref(term->tl_job);
}
free_scrollback(term);
term_free_vterm(term);
vim_free(term->tl_title);
#ifdef FEAT_SESSION
vim_free(term->tl_command);
#endif
vim_free(term->tl_kill);
vim_free(term->tl_status_text);
vim_free(term->tl_opencmd);
vim_free(term->tl_eof_chars);
#ifdef WIN3264
if (term->tl_out_fd != NULL)
fclose(term->tl_out_fd);
#endif
vim_free(term->tl_cursor_color);
vim_free(term);
buf->b_term = NULL;
if (in_terminal_loop == term)
in_terminal_loop = NULL;
}
/*
* Get the part that is connected to the tty. Normally this is PART_IN, but
* when writing buffer lines to the job it can be another. This makes it
* possible to do "1,5term vim -".
*/
static ch_part_T
get_tty_part(term_T *term)
{
#ifdef UNIX
ch_part_T parts[3] = {PART_IN, PART_OUT, PART_ERR};
int i;
for (i = 0; i < 3; ++i)
{
int fd = term->tl_job->jv_channel->ch_part[parts[i]].ch_fd;
if (isatty(fd))
return parts[i];
}
#endif
return PART_IN;
}
/*
* Write job output "msg[len]" to the vterm.
*/
static void
term_write_job_output(term_T *term, char_u *msg, size_t len)
{
VTerm *vterm = term->tl_vterm;
size_t prevlen = vterm_output_get_buffer_current(vterm);
vterm_input_write(vterm, (char *)msg, len);
/* flush vterm buffer when vterm responded to control sequence */
if (prevlen != vterm_output_get_buffer_current(vterm))
{
char buf[KEY_BUF_LEN];
size_t curlen = vterm_output_read(vterm, buf, KEY_BUF_LEN);
if (curlen > 0)
channel_send(term->tl_job->jv_channel, get_tty_part(term),
(char_u *)buf, (int)curlen, NULL);
}
/* this invokes the damage callbacks */
vterm_screen_flush_damage(vterm_obtain_screen(vterm));
}
static void
update_cursor(term_T *term, int redraw)
{
if (term->tl_normal_mode)
return;
#ifdef FEAT_GUI
if (term->tl_system)
windgoto(term->tl_cursor_pos.row + term->tl_toprow,
term->tl_cursor_pos.col);
else
#endif
setcursor();
if (redraw)
{
if (term->tl_buffer == curbuf && term->tl_cursor_visible)
cursor_on();
out_flush();
#ifdef FEAT_GUI
if (gui.in_use)
{
gui_update_cursor(FALSE, FALSE);
gui_mch_flush();
}
#endif
}
}
/*
* Invoked when "msg" output from a job was received. Write it to the terminal
* of "buffer".
*/
void
write_to_term(buf_T *buffer, char_u *msg, channel_T *channel)
{
size_t len = STRLEN(msg);
term_T *term = buffer->b_term;
#ifdef WIN3264
/* Win32: Cannot redirect output of the job, intercept it here and write to
* the file. */
if (term->tl_out_fd != NULL)
{
ch_log(channel, "Writing %d bytes to output file", (int)len);
fwrite(msg, len, 1, term->tl_out_fd);
return;
}
#endif
if (term->tl_vterm == NULL)
{
ch_log(channel, "NOT writing %d bytes to terminal", (int)len);
return;
}
ch_log(channel, "writing %d bytes to terminal", (int)len);
term_write_job_output(term, msg, len);
#ifdef FEAT_GUI
if (term->tl_system)
{
/* show system output, scrolling up the screen as needed */
update_system_term(term);
update_cursor(term, TRUE);
}
else
#endif
/* In Terminal-Normal mode we are displaying the buffer, not the terminal
* contents, thus no screen update is needed. */
if (!term->tl_normal_mode)
{
// Don't use update_screen() when editing the command line, it gets
// cleared.
// TODO: only update once in a while.
ch_log(term->tl_job->jv_channel, "updating screen");
if (buffer == curbuf && (State & CMDLINE) == 0)
{
update_screen(VALID_NO_UPDATE);
/* update_screen() can be slow, check the terminal wasn't closed
* already */
if (buffer == curbuf && curbuf->b_term != NULL)
update_cursor(curbuf->b_term, TRUE);
}
else
redraw_after_callback(TRUE);
}
}
/*
* Send a mouse position and click to the vterm
*/
static int
term_send_mouse(VTerm *vterm, int button, int pressed)
{
VTermModifier mod = VTERM_MOD_NONE;
vterm_mouse_move(vterm, mouse_row - W_WINROW(curwin),
mouse_col - curwin->w_wincol, mod);
if (button != 0)
vterm_mouse_button(vterm, button, pressed, mod);
return TRUE;
}
static int enter_mouse_col = -1;
static int enter_mouse_row = -1;
/*
* Handle a mouse click, drag or release.
* Return TRUE when a mouse event is sent to the terminal.
*/
static int
term_mouse_click(VTerm *vterm, int key)
{
#if defined(FEAT_CLIPBOARD)
/* For modeless selection mouse drag and release events are ignored, unless
* they are preceded with a mouse down event */
static int ignore_drag_release = TRUE;
VTermMouseState mouse_state;
vterm_state_get_mousestate(vterm_obtain_state(vterm), &mouse_state);
if (mouse_state.flags == 0)
{
/* Terminal is not using the mouse, use modeless selection. */
switch (key)
{
case K_LEFTDRAG:
case K_LEFTRELEASE:
case K_RIGHTDRAG:
case K_RIGHTRELEASE:
/* Ignore drag and release events when the button-down wasn't
* seen before. */
if (ignore_drag_release)
{
int save_mouse_col, save_mouse_row;
if (enter_mouse_col < 0)
break;
/* mouse click in the window gave us focus, handle that
* click now */
save_mouse_col = mouse_col;
save_mouse_row = mouse_row;
mouse_col = enter_mouse_col;
mouse_row = enter_mouse_row;
clip_modeless(MOUSE_LEFT, TRUE, FALSE);
mouse_col = save_mouse_col;
mouse_row = save_mouse_row;
}
/* FALLTHROUGH */
case K_LEFTMOUSE:
case K_RIGHTMOUSE:
if (key == K_LEFTRELEASE || key == K_RIGHTRELEASE)
ignore_drag_release = TRUE;
else
ignore_drag_release = FALSE;
/* Should we call mouse_has() here? */
if (clip_star.available)
{
int button, is_click, is_drag;
button = get_mouse_button(KEY2TERMCAP1(key),
&is_click, &is_drag);
if (mouse_model_popup() && button == MOUSE_LEFT
&& (mod_mask & MOD_MASK_SHIFT))
{
/* Translate shift-left to right button. */
button = MOUSE_RIGHT;
mod_mask &= ~MOD_MASK_SHIFT;
}
clip_modeless(button, is_click, is_drag);
}
break;
case K_MIDDLEMOUSE:
if (clip_star.available)
insert_reg('*', TRUE);
break;
}
enter_mouse_col = -1;
return FALSE;
}
#endif
enter_mouse_col = -1;
switch (key)
{
case K_LEFTMOUSE:
case K_LEFTMOUSE_NM: term_send_mouse(vterm, 1, 1); break;
case K_LEFTDRAG: term_send_mouse(vterm, 1, 1); break;
case K_LEFTRELEASE:
case K_LEFTRELEASE_NM: term_send_mouse(vterm, 1, 0); break;
case K_MOUSEMOVE: term_send_mouse(vterm, 0, 0); break;
case K_MIDDLEMOUSE: term_send_mouse(vterm, 2, 1); break;
case K_MIDDLEDRAG: term_send_mouse(vterm, 2, 1); break;
case K_MIDDLERELEASE: term_send_mouse(vterm, 2, 0); break;
case K_RIGHTMOUSE: term_send_mouse(vterm, 3, 1); break;
case K_RIGHTDRAG: term_send_mouse(vterm, 3, 1); break;
case K_RIGHTRELEASE: term_send_mouse(vterm, 3, 0); break;
}
return TRUE;
}
/*
* Convert typed key "c" into bytes to send to the job.
* Return the number of bytes in "buf".
*/
static int
term_convert_key(term_T *term, int c, char *buf)
{
VTerm *vterm = term->tl_vterm;
VTermKey key = VTERM_KEY_NONE;
VTermModifier mod = VTERM_MOD_NONE;
int other = FALSE;
switch (c)
{
/* don't use VTERM_KEY_ENTER, it may do an unwanted conversion */
/* don't use VTERM_KEY_BACKSPACE, it always
* becomes 0x7f DEL */
case K_BS: c = term_backspace_char; break;
case ESC: key = VTERM_KEY_ESCAPE; break;
case K_DEL: key = VTERM_KEY_DEL; break;
case K_DOWN: key = VTERM_KEY_DOWN; break;
case K_S_DOWN: mod = VTERM_MOD_SHIFT;
key = VTERM_KEY_DOWN; break;
case K_END: key = VTERM_KEY_END; break;
case K_S_END: mod = VTERM_MOD_SHIFT;
key = VTERM_KEY_END; break;
case K_C_END: mod = VTERM_MOD_CTRL;
key = VTERM_KEY_END; break;
case K_F10: key = VTERM_KEY_FUNCTION(10); break;
case K_F11: key = VTERM_KEY_FUNCTION(11); break;
case K_F12: key = VTERM_KEY_FUNCTION(12); break;
case K_F1: key = VTERM_KEY_FUNCTION(1); break;
case K_F2: key = VTERM_KEY_FUNCTION(2); break;
case K_F3: key = VTERM_KEY_FUNCTION(3); break;
case K_F4: key = VTERM_KEY_FUNCTION(4); break;
case K_F5: key = VTERM_KEY_FUNCTION(5); break;
case K_F6: key = VTERM_KEY_FUNCTION(6); break;
case K_F7: key = VTERM_KEY_FUNCTION(7); break;
case K_F8: key = VTERM_KEY_FUNCTION(8); break;
case K_F9: key = VTERM_KEY_FUNCTION(9); break;
case K_HOME: key = VTERM_KEY_HOME; break;
case K_S_HOME: mod = VTERM_MOD_SHIFT;
key = VTERM_KEY_HOME; break;
case K_C_HOME: mod = VTERM_MOD_CTRL;
key = VTERM_KEY_HOME; break;
case K_INS: key = VTERM_KEY_INS; break;
case K_K0: key = VTERM_KEY_KP_0; break;
case K_K1: key = VTERM_KEY_KP_1; break;
case K_K2: key = VTERM_KEY_KP_2; break;
case K_K3: key = VTERM_KEY_KP_3; break;
case K_K4: key = VTERM_KEY_KP_4; break;
case K_K5: key = VTERM_KEY_KP_5; break;
case K_K6: key = VTERM_KEY_KP_6; break;
case K_K7: key = VTERM_KEY_KP_7; break;
case K_K8: key = VTERM_KEY_KP_8; break;
case K_K9: key = VTERM_KEY_KP_9; break;
case K_KDEL: key = VTERM_KEY_DEL; break; /* TODO */
case K_KDIVIDE: key = VTERM_KEY_KP_DIVIDE; break;
case K_KEND: key = VTERM_KEY_KP_1; break; /* TODO */
case K_KENTER: key = VTERM_KEY_KP_ENTER; break;
case K_KHOME: key = VTERM_KEY_KP_7; break; /* TODO */
case K_KINS: key = VTERM_KEY_KP_0; break; /* TODO */
case K_KMINUS: key = VTERM_KEY_KP_MINUS; break;
case K_KMULTIPLY: key = VTERM_KEY_KP_MULT; break;
case K_KPAGEDOWN: key = VTERM_KEY_KP_3; break; /* TODO */
case K_KPAGEUP: key = VTERM_KEY_KP_9; break; /* TODO */
case K_KPLUS: key = VTERM_KEY_KP_PLUS; break;
case K_KPOINT: key = VTERM_KEY_KP_PERIOD; break;
case K_LEFT: key = VTERM_KEY_LEFT; break;
case K_S_LEFT: mod = VTERM_MOD_SHIFT;
key = VTERM_KEY_LEFT; break;
case K_C_LEFT: mod = VTERM_MOD_CTRL;
key = VTERM_KEY_LEFT; break;
case K_PAGEDOWN: key = VTERM_KEY_PAGEDOWN; break;
case K_PAGEUP: key = VTERM_KEY_PAGEUP; break;
case K_RIGHT: key = VTERM_KEY_RIGHT; break;
case K_S_RIGHT: mod = VTERM_MOD_SHIFT;
key = VTERM_KEY_RIGHT; break;
case K_C_RIGHT: mod = VTERM_MOD_CTRL;
key = VTERM_KEY_RIGHT; break;
case K_UP: key = VTERM_KEY_UP; break;
case K_S_UP: mod = VTERM_MOD_SHIFT;
key = VTERM_KEY_UP; break;
case TAB: key = VTERM_KEY_TAB; break;
case K_S_TAB: mod = VTERM_MOD_SHIFT;
key = VTERM_KEY_TAB; break;
case K_MOUSEUP: other = term_send_mouse(vterm, 5, 1); break;
case K_MOUSEDOWN: other = term_send_mouse(vterm, 4, 1); break;
case K_MOUSELEFT: /* TODO */ return 0;
case K_MOUSERIGHT: /* TODO */ return 0;
case K_LEFTMOUSE:
case K_LEFTMOUSE_NM:
case K_LEFTDRAG:
case K_LEFTRELEASE:
case K_LEFTRELEASE_NM:
case K_MOUSEMOVE:
case K_MIDDLEMOUSE:
case K_MIDDLEDRAG:
case K_MIDDLERELEASE:
case K_RIGHTMOUSE:
case K_RIGHTDRAG:
case K_RIGHTRELEASE: if (!term_mouse_click(vterm, c))
return 0;
other = TRUE;
break;
case K_X1MOUSE: /* TODO */ return 0;
case K_X1DRAG: /* TODO */ return 0;
case K_X1RELEASE: /* TODO */ return 0;
case K_X2MOUSE: /* TODO */ return 0;
case K_X2DRAG: /* TODO */ return 0;
case K_X2RELEASE: /* TODO */ return 0;
case K_IGNORE: return 0;
case K_NOP: return 0;
case K_UNDO: return 0;
case K_HELP: return 0;
case K_XF1: key = VTERM_KEY_FUNCTION(1); break;
case K_XF2: key = VTERM_KEY_FUNCTION(2); break;
case K_XF3: key = VTERM_KEY_FUNCTION(3); break;
case K_XF4: key = VTERM_KEY_FUNCTION(4); break;
case K_SELECT: return 0;
#ifdef FEAT_GUI
case K_VER_SCROLLBAR: return 0;
case K_HOR_SCROLLBAR: return 0;
#endif
#ifdef FEAT_GUI_TABLINE
case K_TABLINE: return 0;
case K_TABMENU: return 0;
#endif
#ifdef FEAT_NETBEANS_INTG
case K_F21: key = VTERM_KEY_FUNCTION(21); break;
#endif
#ifdef FEAT_DND
case K_DROP: return 0;
#endif
case K_CURSORHOLD: return 0;
case K_PS: vterm_keyboard_start_paste(vterm);
other = TRUE;
break;
case K_PE: vterm_keyboard_end_paste(vterm);
other = TRUE;
break;
}
/*
* Convert special keys to vterm keys:
* - Write keys to vterm: vterm_keyboard_key()
* - Write output to channel.
* TODO: use mod_mask
*/
if (key != VTERM_KEY_NONE)
/* Special key, let vterm convert it. */
vterm_keyboard_key(vterm, key, mod);
else if (!other)
/* Normal character, let vterm convert it. */
vterm_keyboard_unichar(vterm, c, mod);
/* Read back the converted escape sequence. */
return (int)vterm_output_read(vterm, buf, KEY_BUF_LEN);
}
/*
* Return TRUE if the job for "term" is still running.
* If "check_job_status" is TRUE update the job status.
*/
static int
term_job_running_check(term_T *term, int check_job_status)
{
/* Also consider the job finished when the channel is closed, to avoid a
* race condition when updating the title. */
if (term != NULL
&& term->tl_job != NULL
&& channel_is_open(term->tl_job->jv_channel))
{
if (check_job_status)
job_status(term->tl_job);
return (term->tl_job->jv_status == JOB_STARTED
|| term->tl_job->jv_channel->ch_keep_open);
}
return FALSE;
}
/*
* Return TRUE if the job for "term" is still running.
*/
int
term_job_running(term_T *term)
{
return term_job_running_check(term, FALSE);
}
/*
* Return TRUE if "term" has an active channel and used ":term NONE".
*/
int
term_none_open(term_T *term)
{
/* Also consider the job finished when the channel is closed, to avoid a
* race condition when updating the title. */
return term != NULL
&& term->tl_job != NULL
&& channel_is_open(term->tl_job->jv_channel)
&& term->tl_job->jv_channel->ch_keep_open;
}
/*
* Used when exiting: kill the job in "buf" if so desired.
* Return OK when the job finished.
* Return FAIL when the job is still running.
*/
int
term_try_stop_job(buf_T *buf)
{
int count;
char *how = (char *)buf->b_term->tl_kill;
#if defined(FEAT_GUI_DIALOG) || defined(FEAT_CON_DIALOG)
if ((how == NULL || *how == NUL) && (p_confirm || cmdmod.confirm))
{
char_u buff[DIALOG_MSG_SIZE];
int ret;
dialog_msg(buff, _("Kill job in \"%s\"?"), buf->b_fname);
ret = vim_dialog_yesnocancel(VIM_QUESTION, NULL, buff, 1);
if (ret == VIM_YES)
how = "kill";
else if (ret == VIM_CANCEL)
return FAIL;
}
#endif
if (how == NULL || *how == NUL)
return FAIL;
job_stop(buf->b_term->tl_job, NULL, how);
/* wait for up to a second for the job to die */
for (count = 0; count < 100; ++count)
{
/* buffer, terminal and job may be cleaned up while waiting */
if (!buf_valid(buf)
|| buf->b_term == NULL
|| buf->b_term->tl_job == NULL)
return OK;
/* call job_status() to update jv_status */
job_status(buf->b_term->tl_job);
if (buf->b_term->tl_job->jv_status >= JOB_ENDED)
return OK;
ui_delay(10L, FALSE);
mch_check_messages();
parse_queued_messages();
}
return FAIL;
}
/*
* Add the last line of the scrollback buffer to the buffer in the window.
*/
static void
add_scrollback_line_to_buffer(term_T *term, char_u *text, int len)
{
buf_T *buf = term->tl_buffer;
int empty = (buf->b_ml.ml_flags & ML_EMPTY);
linenr_T lnum = buf->b_ml.ml_line_count;
#ifdef WIN3264
if (!enc_utf8 && enc_codepage > 0)
{
WCHAR *ret = NULL;
int length = 0;
MultiByteToWideChar_alloc(CP_UTF8, 0, (char*)text, len + 1,
&ret, &length);
if (ret != NULL)
{
WideCharToMultiByte_alloc(enc_codepage, 0,
ret, length, (char **)&text, &len, 0, 0);
vim_free(ret);
ml_append_buf(term->tl_buffer, lnum, text, len, FALSE);
vim_free(text);
}
}
else
#endif
ml_append_buf(term->tl_buffer, lnum, text, len + 1, FALSE);
if (empty)
{
/* Delete the empty line that was in the empty buffer. */
curbuf = buf;
ml_delete(1, FALSE);
curbuf = curwin->w_buffer;
}
}
static void
cell2cellattr(const VTermScreenCell *cell, cellattr_T *attr)
{
attr->width = cell->width;
attr->attrs = cell->attrs;
attr->fg = cell->fg;
attr->bg = cell->bg;
}
static int
equal_celattr(cellattr_T *a, cellattr_T *b)
{
/* Comparing the colors should be sufficient. */
return a->fg.red == b->fg.red
&& a->fg.green == b->fg.green
&& a->fg.blue == b->fg.blue
&& a->bg.red == b->bg.red
&& a->bg.green == b->bg.green
&& a->bg.blue == b->bg.blue;
}
/*
* Add an empty scrollback line to "term". When "lnum" is not zero, add the
* line at this position. Otherwise at the end.
*/
static int
add_empty_scrollback(term_T *term, cellattr_T *fill_attr, int lnum)
{
if (ga_grow(&term->tl_scrollback, 1) == OK)
{
sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data
+ term->tl_scrollback.ga_len;
if (lnum > 0)
{
int i;
for (i = 0; i < term->tl_scrollback.ga_len - lnum; ++i)
{
*line = *(line - 1);
--line;
}
}
line->sb_cols = 0;
line->sb_cells = NULL;
line->sb_fill_attr = *fill_attr;
++term->tl_scrollback.ga_len;
return OK;
}
return FALSE;
}
/*
* Remove the terminal contents from the scrollback and the buffer.
* Used before adding a new scrollback line or updating the buffer for lines
* displayed in the terminal.
*/
static void
cleanup_scrollback(term_T *term)
{
sb_line_T *line;
garray_T *gap;
curbuf = term->tl_buffer;
gap = &term->tl_scrollback;
while (curbuf->b_ml.ml_line_count > term->tl_scrollback_scrolled
&& gap->ga_len > 0)
{
ml_delete(curbuf->b_ml.ml_line_count, FALSE);
line = (sb_line_T *)gap->ga_data + gap->ga_len - 1;
vim_free(line->sb_cells);
--gap->ga_len;
}
curbuf = curwin->w_buffer;
if (curbuf == term->tl_buffer)
check_cursor();
}
/*
* Add the current lines of the terminal to scrollback and to the buffer.
*/
static void
update_snapshot(term_T *term)
{
VTermScreen *screen;
int len;
int lines_skipped = 0;
VTermPos pos;
VTermScreenCell cell;
cellattr_T fill_attr, new_fill_attr;
cellattr_T *p;
ch_log(term->tl_job == NULL ? NULL : term->tl_job->jv_channel,
"Adding terminal window snapshot to buffer");
/* First remove the lines that were appended before, they might be
* outdated. */
cleanup_scrollback(term);
screen = vterm_obtain_screen(term->tl_vterm);
fill_attr = new_fill_attr = term->tl_default_color;
for (pos.row = 0; pos.row < term->tl_rows; ++pos.row)
{
len = 0;
for (pos.col = 0; pos.col < term->tl_cols; ++pos.col)
if (vterm_screen_get_cell(screen, pos, &cell) != 0
&& cell.chars[0] != NUL)
{
len = pos.col + 1;
new_fill_attr = term->tl_default_color;
}
else
/* Assume the last attr is the filler attr. */
cell2cellattr(&cell, &new_fill_attr);
if (len == 0 && equal_celattr(&new_fill_attr, &fill_attr))
++lines_skipped;
else
{
while (lines_skipped > 0)
{
/* Line was skipped, add an empty line. */
--lines_skipped;
if (add_empty_scrollback(term, &fill_attr, 0) == OK)
add_scrollback_line_to_buffer(term, (char_u *)"", 0);
}
if (len == 0)
p = NULL;
else
p = (cellattr_T *)alloc((int)sizeof(cellattr_T) * len);
if ((p != NULL || len == 0)
&& ga_grow(&term->tl_scrollback, 1) == OK)
{
garray_T ga;
int width;
sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data
+ term->tl_scrollback.ga_len;
ga_init2(&ga, 1, 100);
for (pos.col = 0; pos.col < len; pos.col += width)
{
if (vterm_screen_get_cell(screen, pos, &cell) == 0)
{
width = 1;
vim_memset(p + pos.col, 0, sizeof(cellattr_T));
if (ga_grow(&ga, 1) == OK)
ga.ga_len += utf_char2bytes(' ',
(char_u *)ga.ga_data + ga.ga_len);
}
else
{
width = cell.width;
cell2cellattr(&cell, &p[pos.col]);
// Each character can be up to 6 bytes.
if (ga_grow(&ga, VTERM_MAX_CHARS_PER_CELL * 6) == OK)
{
int i;
int c;
for (i = 0; (c = cell.chars[i]) > 0 || i == 0; ++i)
ga.ga_len += utf_char2bytes(c == NUL ? ' ' : c,
(char_u *)ga.ga_data + ga.ga_len);
}
}
}
line->sb_cols = len;
line->sb_cells = p;
line->sb_fill_attr = new_fill_attr;
fill_attr = new_fill_attr;
++term->tl_scrollback.ga_len;
if (ga_grow(&ga, 1) == FAIL)
add_scrollback_line_to_buffer(term, (char_u *)"", 0);
else
{
*((char_u *)ga.ga_data + ga.ga_len) = NUL;
add_scrollback_line_to_buffer(term, ga.ga_data, ga.ga_len);
}
ga_clear(&ga);
}
else
vim_free(p);
}
}
// Add trailing empty lines.
for (pos.row = term->tl_scrollback.ga_len;
pos.row < term->tl_scrollback_scrolled + term->tl_cursor_pos.row;
++pos.row)
{
if (add_empty_scrollback(term, &fill_attr, 0) == OK)
add_scrollback_line_to_buffer(term, (char_u *)"", 0);
}
term->tl_dirty_snapshot = FALSE;
#ifdef FEAT_TIMERS
term->tl_timer_set = FALSE;
#endif
}
/*
* If needed, add the current lines of the terminal to scrollback and to the
* buffer. Called after the job has ended and when switching to
* Terminal-Normal mode.
* When "redraw" is TRUE redraw the windows that show the terminal.
*/
static void
may_move_terminal_to_buffer(term_T *term, int redraw)
{
win_T *wp;
if (term->tl_vterm == NULL)
return;
/* Update the snapshot only if something changes or the buffer does not
* have all the lines. */
if (term->tl_dirty_snapshot || term->tl_buffer->b_ml.ml_line_count
<= term->tl_scrollback_scrolled)
update_snapshot(term);
/* Obtain the current background color. */
vterm_state_get_default_colors(vterm_obtain_state(term->tl_vterm),
&term->tl_default_color.fg, &term->tl_default_color.bg);
if (redraw)
FOR_ALL_WINDOWS(wp)
{
if (wp->w_buffer == term->tl_buffer)
{
wp->w_cursor.lnum = term->tl_buffer->b_ml.ml_line_count;
wp->w_cursor.col = 0;
wp->w_valid = 0;
if (wp->w_cursor.lnum >= wp->w_height)
{
linenr_T min_topline = wp->w_cursor.lnum - wp->w_height + 1;
if (wp->w_topline < min_topline)
wp->w_topline = min_topline;
}
redraw_win_later(wp, NOT_VALID);
}
}
}
#if defined(FEAT_TIMERS) || defined(PROTO)
/*
* Check if any terminal timer expired. If so, copy text from the terminal to
* the buffer.
* Return the time until the next timer will expire.
*/
int
term_check_timers(int next_due_arg, proftime_T *now)
{
term_T *term;
int next_due = next_due_arg;
for (term = first_term; term != NULL; term = term->tl_next)
{
if (term->tl_timer_set && !term->tl_normal_mode)
{
long this_due = proftime_time_left(&term->tl_timer_due, now);
if (this_due <= 1)
{
term->tl_timer_set = FALSE;
may_move_terminal_to_buffer(term, FALSE);
}
else if (next_due == -1 || next_due > this_due)
next_due = this_due;
}
}
return next_due;
}
#endif
static void
set_terminal_mode(term_T *term, int normal_mode)
{
term->tl_normal_mode = normal_mode;
VIM_CLEAR(term->tl_status_text);
if (term->tl_buffer == curbuf)
maketitle();
}
/*
* Called after the job if finished and Terminal mode is not active:
* Move the vterm contents into the scrollback buffer and free the vterm.
*/
static void
cleanup_vterm(term_T *term)
{
if (term->tl_finish != TL_FINISH_CLOSE)
may_move_terminal_to_buffer(term, TRUE);
term_free_vterm(term);
set_terminal_mode(term, FALSE);
}
/*
* Switch from Terminal-Job mode to Terminal-Normal mode.
* Suspends updating the terminal window.
*/
static void
term_enter_normal_mode(void)
{
term_T *term = curbuf->b_term;
set_terminal_mode(term, TRUE);
/* Append the current terminal contents to the buffer. */
may_move_terminal_to_buffer(term, TRUE);
/* Move the window cursor to the position of the cursor in the
* terminal. */
curwin->w_cursor.lnum = term->tl_scrollback_scrolled
+ term->tl_cursor_pos.row + 1;
check_cursor();
if (coladvance(term->tl_cursor_pos.col) == FAIL)
coladvance(MAXCOL);
/* Display the same lines as in the terminal. */
curwin->w_topline = term->tl_scrollback_scrolled + 1;
}
/*
* Returns TRUE if the current window contains a terminal and we are in
* Terminal-Normal mode.
*/
int
term_in_normal_mode(void)
{
term_T *term = curbuf->b_term;
return term != NULL && term->tl_normal_mode;
}
/*
* Switch from Terminal-Normal mode to Terminal-Job mode.
* Restores updating the terminal window.
*/
void
term_enter_job_mode()
{
term_T *term = curbuf->b_term;
set_terminal_mode(term, FALSE);
if (term->tl_channel_closed)
cleanup_vterm(term);
redraw_buf_and_status_later(curbuf, NOT_VALID);
}
/*
* Get a key from the user with terminal mode mappings.
* Note: while waiting a terminal may be closed and freed if the channel is
* closed and ++close was used.
*/
static int
term_vgetc()
{
int c;
int save_State = State;
State = TERMINAL;
got_int = FALSE;
#ifdef WIN3264
ctrl_break_was_pressed = FALSE;
#endif
c = vgetc();
got_int = FALSE;
State = save_State;
return c;
}
static int mouse_was_outside = FALSE;
/*
* Send keys to terminal.
* Return FAIL when the key needs to be handled in Normal mode.
* Return OK when the key was dropped or sent to the terminal.
*/
int
send_keys_to_term(term_T *term, int c, int typed)
{
char msg[KEY_BUF_LEN];
size_t len;
int dragging_outside = FALSE;
/* Catch keys that need to be handled as in Normal mode. */
switch (c)
{
case NUL:
case K_ZERO:
if (typed)
stuffcharReadbuff(c);
return FAIL;
case K_TABLINE:
stuffcharReadbuff(c);
return FAIL;
case K_IGNORE:
case K_CANCEL: // used for :normal when running out of chars
return FAIL;
case K_LEFTDRAG:
case K_MIDDLEDRAG:
case K_RIGHTDRAG:
case K_X1DRAG:
case K_X2DRAG:
dragging_outside = mouse_was_outside;
/* FALLTHROUGH */
case K_LEFTMOUSE:
case K_LEFTMOUSE_NM:
case K_LEFTRELEASE:
case K_LEFTRELEASE_NM:
case K_MOUSEMOVE:
case K_MIDDLEMOUSE:
case K_MIDDLERELEASE:
case K_RIGHTMOUSE:
case K_RIGHTRELEASE:
case K_X1MOUSE:
case K_X1RELEASE:
case K_X2MOUSE:
case K_X2RELEASE:
case K_MOUSEUP:
case K_MOUSEDOWN:
case K_MOUSELEFT:
case K_MOUSERIGHT:
if (mouse_row < W_WINROW(curwin)
|| mouse_row >= (W_WINROW(curwin) + curwin->w_height)
|| mouse_col < curwin->w_wincol
|| mouse_col >= W_ENDCOL(curwin)
|| dragging_outside)
{
/* click or scroll outside the current window or on status line
* or vertical separator */
if (typed)
{
stuffcharReadbuff(c);
mouse_was_outside = TRUE;
}
return FAIL;
}
}
if (typed)
mouse_was_outside = FALSE;
/* Convert the typed key to a sequence of bytes for the job. */
len = term_convert_key(term, c, msg);
if (len > 0)
/* TODO: if FAIL is returned, stop? */
channel_send(term->tl_job->jv_channel, get_tty_part(term),
(char_u *)msg, (int)len, NULL);
return OK;
}
static void
position_cursor(win_T *wp, VTermPos *pos)
{
wp->w_wrow = MIN(pos->row, MAX(0, wp->w_height - 1));
wp->w_wcol = MIN(pos->col, MAX(0, wp->w_width - 1));
wp->w_valid |= (VALID_WCOL|VALID_WROW);
}
/*
* Handle CTRL-W "": send register contents to the job.
*/
static void
term_paste_register(int prev_c UNUSED)
{
int c;
list_T *l;
listitem_T *item;
long reglen = 0;
int type;
#ifdef FEAT_CMDL_INFO
if (add_to_showcmd(prev_c))
if (add_to_showcmd('"'))
out_flush();
#endif
c = term_vgetc();
#ifdef FEAT_CMDL_INFO
clear_showcmd();
#endif
if (!term_use_loop())
/* job finished while waiting for a character */
return;
/* CTRL-W "= prompt for expression to evaluate. */
if (c == '=' && get_expr_register() != '=')
return;
if (!term_use_loop())
/* job finished while waiting for a character */
return;
l = (list_T *)get_reg_contents(c, GREG_LIST);
if (l != NULL)
{
type = get_reg_type(c, ®len);
for (item = l->lv_first; item != NULL; item = item->li_next)
{
char_u *s = tv_get_string(&item->li_tv);
#ifdef WIN3264
char_u *tmp = s;
if (!enc_utf8 && enc_codepage > 0)
{
WCHAR *ret = NULL;
int length = 0;
MultiByteToWideChar_alloc(enc_codepage, 0, (char *)s,
(int)STRLEN(s), &ret, &length);
if (ret != NULL)
{
WideCharToMultiByte_alloc(CP_UTF8, 0,
ret, length, (char **)&s, &length, 0, 0);
vim_free(ret);
}
}
#endif
channel_send(curbuf->b_term->tl_job->jv_channel, PART_IN,
s, (int)STRLEN(s), NULL);
#ifdef WIN3264
if (tmp != s)
vim_free(s);
#endif
if (item->li_next != NULL || type == MLINE)
channel_send(curbuf->b_term->tl_job->jv_channel, PART_IN,
(char_u *)"\r", 1, NULL);
}
list_free(l);
}
}
/*
* Return TRUE when waiting for a character in the terminal, the cursor of the
* terminal should be displayed.
*/
int
terminal_is_active()
{
return in_terminal_loop != NULL;
}
#if defined(FEAT_GUI) || defined(PROTO)
cursorentry_T *
term_get_cursor_shape(guicolor_T *fg, guicolor_T *bg)
{
term_T *term = in_terminal_loop;
static cursorentry_T entry;
int id;
guicolor_T term_fg, term_bg;
vim_memset(&entry, 0, sizeof(entry));
entry.shape = entry.mshape =
term->tl_cursor_shape == VTERM_PROP_CURSORSHAPE_UNDERLINE ? SHAPE_HOR :
term->tl_cursor_shape == VTERM_PROP_CURSORSHAPE_BAR_LEFT ? SHAPE_VER :
SHAPE_BLOCK;
entry.percentage = 20;
if (term->tl_cursor_blink)
{
entry.blinkwait = 700;
entry.blinkon = 400;
entry.blinkoff = 250;
}
/* The "Terminal" highlight group overrules the defaults. */
id = syn_name2id((char_u *)"Terminal");
if (id != 0)
{
syn_id2colors(id, &term_fg, &term_bg);
*fg = term_bg;
}
else
*fg = gui.back_pixel;
if (term->tl_cursor_color == NULL)
{
if (id != 0)
*bg = term_fg;
else
*bg = gui.norm_pixel;
}
else
*bg = color_name2handle(term->tl_cursor_color);
entry.name = "n";
entry.used_for = SHAPE_CURSOR;
return &entry;
}
#endif
static void
may_output_cursor_props(void)
{
if (!cursor_color_equal(last_set_cursor_color, desired_cursor_color)
|| last_set_cursor_shape != desired_cursor_shape
|| last_set_cursor_blink != desired_cursor_blink)
{
cursor_color_copy(&last_set_cursor_color, desired_cursor_color);
last_set_cursor_shape = desired_cursor_shape;
last_set_cursor_blink = desired_cursor_blink;
term_cursor_color(cursor_color_get(desired_cursor_color));
if (desired_cursor_shape == -1 || desired_cursor_blink == -1)
/* this will restore the initial cursor style, if possible */
ui_cursor_shape_forced(TRUE);
else
term_cursor_shape(desired_cursor_shape, desired_cursor_blink);
}
}
/*
* Set the cursor color and shape, if not last set to these.
*/
static void
may_set_cursor_props(term_T *term)
{
#ifdef FEAT_GUI
/* For the GUI the cursor properties are obtained with
* term_get_cursor_shape(). */
if (gui.in_use)
return;
#endif
if (in_terminal_loop == term)
{
cursor_color_copy(&desired_cursor_color, term->tl_cursor_color);
desired_cursor_shape = term->tl_cursor_shape;
desired_cursor_blink = term->tl_cursor_blink;
may_output_cursor_props();
}
}
/*
* Reset the desired cursor properties and restore them when needed.
*/
static void
prepare_restore_cursor_props(void)
{
#ifdef FEAT_GUI
if (gui.in_use)
return;
#endif
cursor_color_copy(&desired_cursor_color, NULL);
desired_cursor_shape = -1;
desired_cursor_blink = -1;
may_output_cursor_props();
}
/*
* Returns TRUE if the current window contains a terminal and we are sending
* keys to the job.
* If "check_job_status" is TRUE update the job status.
*/
static int
term_use_loop_check(int check_job_status)
{
term_T *term = curbuf->b_term;
return term != NULL
&& !term->tl_normal_mode
&& term->tl_vterm != NULL
&& term_job_running_check(term, check_job_status);
}
/*
* Returns TRUE if the current window contains a terminal and we are sending
* keys to the job.
*/
int
term_use_loop(void)
{
return term_use_loop_check(FALSE);
}
/*
* Called when entering a window with the mouse. If this is a terminal window
* we may want to change state.
*/
void
term_win_entered()
{
term_T *term = curbuf->b_term;
if (term != NULL)
{
if (term_use_loop_check(TRUE))
{
reset_VIsual_and_resel();
if (State & INSERT)
stop_insert_mode = TRUE;
}
mouse_was_outside = FALSE;
enter_mouse_col = mouse_col;
enter_mouse_row = mouse_row;
}
}
/*
* Wait for input and send it to the job.
* When "blocking" is TRUE wait for a character to be typed. Otherwise return
* when there is no more typahead.
* Return when the start of a CTRL-W command is typed or anything else that
* should be handled as a Normal mode command.
* Returns OK if a typed character is to be handled in Normal mode, FAIL if
* the terminal was closed.
*/
int
terminal_loop(int blocking)
{
int c;
int termwinkey = 0;
int ret;
#ifdef UNIX
int tty_fd = curbuf->b_term->tl_job->jv_channel
->ch_part[get_tty_part(curbuf->b_term)].ch_fd;
#endif
int restore_cursor = FALSE;
/* Remember the terminal we are sending keys to. However, the terminal
* might be closed while waiting for a character, e.g. typing "exit" in a
* shell and ++close was used. Therefore use curbuf->b_term instead of a
* stored reference. */
in_terminal_loop = curbuf->b_term;
if (*curwin->w_p_twk != NUL)
{
termwinkey = string_to_key(curwin->w_p_twk, TRUE);
if (termwinkey == Ctrl_W)
termwinkey = 0;
}
position_cursor(curwin, &curbuf->b_term->tl_cursor_pos);
may_set_cursor_props(curbuf->b_term);
while (blocking || vpeekc_nomap() != NUL)
{
#ifdef FEAT_GUI
if (!curbuf->b_term->tl_system)
#endif
/* TODO: skip screen update when handling a sequence of keys. */
/* Repeat redrawing in case a message is received while redrawing.
*/
while (must_redraw != 0)
if (update_screen(0) == FAIL)
break;
if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term)
/* job finished while redrawing */
break;
update_cursor(curbuf->b_term, FALSE);
restore_cursor = TRUE;
c = term_vgetc();
if (!term_use_loop_check(TRUE) || in_terminal_loop != curbuf->b_term)
{
/* Job finished while waiting for a character. Push back the
* received character. */
if (c != K_IGNORE)
vungetc(c);
break;
}
if (c == K_IGNORE)
continue;
#ifdef UNIX
/*
* The shell or another program may change the tty settings. Getting
* them for every typed character is a bit of overhead, but it's needed
* for the first character typed, e.g. when Vim starts in a shell.
*/
if (isatty(tty_fd))
{
ttyinfo_T info;
/* Get the current backspace character of the pty. */
if (get_tty_info(tty_fd, &info) == OK)
term_backspace_char = info.backspace;
}
#endif
#ifdef WIN3264
/* On Windows winpty handles CTRL-C, don't send a CTRL_C_EVENT.
* Use CTRL-BREAK to kill the job. */
if (ctrl_break_was_pressed)
mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill");
#endif
/* Was either CTRL-W (termwinkey) or CTRL-\ pressed?
* Not in a system terminal. */
if ((c == (termwinkey == 0 ? Ctrl_W : termwinkey) || c == Ctrl_BSL)
#ifdef FEAT_GUI
&& !curbuf->b_term->tl_system
#endif
)
{
int prev_c = c;
#ifdef FEAT_CMDL_INFO
if (add_to_showcmd(c))
out_flush();
#endif
c = term_vgetc();
#ifdef FEAT_CMDL_INFO
clear_showcmd();
#endif
if (!term_use_loop_check(TRUE)
|| in_terminal_loop != curbuf->b_term)
/* job finished while waiting for a character */
break;
if (prev_c == Ctrl_BSL)
{
if (c == Ctrl_N)
{
/* CTRL-\ CTRL-N : go to Terminal-Normal mode. */
term_enter_normal_mode();
ret = FAIL;
goto theend;
}
/* Send both keys to the terminal. */
send_keys_to_term(curbuf->b_term, prev_c, TRUE);
}
else if (c == Ctrl_C)
{
/* "CTRL-W CTRL-C" or 'termwinkey' CTRL-C: end the job */
mch_signal_job(curbuf->b_term->tl_job, (char_u *)"kill");
}
else if (c == '.')
{
/* "CTRL-W .": send CTRL-W to the job */
/* "'termwinkey' .": send 'termwinkey' to the job */
c = termwinkey == 0 ? Ctrl_W : termwinkey;
}
else if (c == Ctrl_BSL)
{
/* "CTRL-W CTRL-\": send CTRL-\ to the job */
c = Ctrl_BSL;
}
else if (c == 'N')
{
/* CTRL-W N : go to Terminal-Normal mode. */
term_enter_normal_mode();
ret = FAIL;
goto theend;
}
else if (c == '"')
{
term_paste_register(prev_c);
continue;
}
else if (termwinkey == 0 || c != termwinkey)
{
stuffcharReadbuff(Ctrl_W);
stuffcharReadbuff(c);
ret = OK;
goto theend;
}
}
# ifdef WIN3264
if (!enc_utf8 && has_mbyte && c >= 0x80)
{
WCHAR wc;
char_u mb[3];
mb[0] = (unsigned)c >> 8;
mb[1] = c;
if (MultiByteToWideChar(GetACP(), 0, (char*)mb, 2, &wc, 1) > 0)
c = wc;
}
# endif
if (send_keys_to_term(curbuf->b_term, c, TRUE) != OK)
{
if (c == K_MOUSEMOVE)
/* We are sure to come back here, don't reset the cursor color
* and shape to avoid flickering. */
restore_cursor = FALSE;
ret = OK;
goto theend;
}
}
ret = FAIL;
theend:
in_terminal_loop = NULL;
if (restore_cursor)
prepare_restore_cursor_props();
/* Move a snapshot of the screen contents to the buffer, so that completion
* works in other buffers. */
if (curbuf->b_term != NULL && !curbuf->b_term->tl_normal_mode)
may_move_terminal_to_buffer(curbuf->b_term, FALSE);
return ret;
}
/*
* Called when a job has finished.
* This updates the title and status, but does not close the vterm, because
* there might still be pending output in the channel.
*/
void
term_job_ended(job_T *job)
{
term_T *term;
int did_one = FALSE;
for (term = first_term; term != NULL; term = term->tl_next)
if (term->tl_job == job)
{
VIM_CLEAR(term->tl_title);
VIM_CLEAR(term->tl_status_text);
redraw_buf_and_status_later(term->tl_buffer, VALID);
did_one = TRUE;
}
if (did_one)
redraw_statuslines();
if (curbuf->b_term != NULL)
{
if (curbuf->b_term->tl_job == job)
maketitle();
update_cursor(curbuf->b_term, TRUE);
}
}
static void
may_toggle_cursor(term_T *term)
{
if (in_terminal_loop == term)
{
if (term->tl_cursor_visible)
cursor_on();
else
cursor_off();
}
}
/*
* Reverse engineer the RGB value into a cterm color index.
* First color is 1. Return 0 if no match found (default color).
*/
static int
color2index(VTermColor *color, int fg, int *boldp)
{
int red = color->red;
int blue = color->blue;
int green = color->green;
if (color->ansi_index != VTERM_ANSI_INDEX_NONE)
{
/* First 16 colors and default: use the ANSI index, because these
* colors can be redefined. */
if (t_colors >= 16)
return color->ansi_index;
switch (color->ansi_index)
{
case 0: return 0;
case 1: return lookup_color( 0, fg, boldp) + 1; /* black */
case 2: return lookup_color( 4, fg, boldp) + 1; /* dark red */
case 3: return lookup_color( 2, fg, boldp) + 1; /* dark green */
case 4: return lookup_color( 6, fg, boldp) + 1; /* brown */
case 5: return lookup_color( 1, fg, boldp) + 1; /* dark blue */
case 6: return lookup_color( 5, fg, boldp) + 1; /* dark magenta */
case 7: return lookup_color( 3, fg, boldp) + 1; /* dark cyan */
case 8: return lookup_color( 8, fg, boldp) + 1; /* light grey */
case 9: return lookup_color(12, fg, boldp) + 1; /* dark grey */
case 10: return lookup_color(20, fg, boldp) + 1; /* red */
case 11: return lookup_color(16, fg, boldp) + 1; /* green */
case 12: return lookup_color(24, fg, boldp) + 1; /* yellow */
case 13: return lookup_color(14, fg, boldp) + 1; /* blue */
case 14: return lookup_color(22, fg, boldp) + 1; /* magenta */
case 15: return lookup_color(18, fg, boldp) + 1; /* cyan */
case 16: return lookup_color(26, fg, boldp) + 1; /* white */
}
}
if (t_colors >= 256)
{
if (red == blue && red == green)
{
/* 24-color greyscale plus white and black */
static int cutoff[23] = {
0x0D, 0x17, 0x21, 0x2B, 0x35, 0x3F, 0x49, 0x53, 0x5D, 0x67,
0x71, 0x7B, 0x85, 0x8F, 0x99, 0xA3, 0xAD, 0xB7, 0xC1, 0xCB,
0xD5, 0xDF, 0xE9};
int i;
if (red < 5)
return 17; /* 00/00/00 */
if (red > 245) /* ff/ff/ff */
return 232;
for (i = 0; i < 23; ++i)
if (red < cutoff[i])
return i + 233;
return 256;
}
{
static int cutoff[5] = {0x2F, 0x73, 0x9B, 0xC3, 0xEB};
int ri, gi, bi;
/* 216-color cube */
for (ri = 0; ri < 5; ++ri)
if (red < cutoff[ri])
break;
for (gi = 0; gi < 5; ++gi)
if (green < cutoff[gi])
break;
for (bi = 0; bi < 5; ++bi)
if (blue < cutoff[bi])
break;
return 17 + ri * 36 + gi * 6 + bi;
}
}
return 0;
}
/*
* Convert Vterm attributes to highlight flags.
*/
static int
vtermAttr2hl(VTermScreenCellAttrs cellattrs)
{
int attr = 0;
if (cellattrs.bold)
attr |= HL_BOLD;
if (cellattrs.underline)
attr |= HL_UNDERLINE;
if (cellattrs.italic)
attr |= HL_ITALIC;
if (cellattrs.strike)
attr |= HL_STRIKETHROUGH;
if (cellattrs.reverse)
attr |= HL_INVERSE;
return attr;
}
/*
* Store Vterm attributes in "cell" from highlight flags.
*/
static void
hl2vtermAttr(int attr, cellattr_T *cell)
{
vim_memset(&cell->attrs, 0, sizeof(VTermScreenCellAttrs));
if (attr & HL_BOLD)
cell->attrs.bold = 1;
if (attr & HL_UNDERLINE)
cell->attrs.underline = 1;
if (attr & HL_ITALIC)
cell->attrs.italic = 1;
if (attr & HL_STRIKETHROUGH)
cell->attrs.strike = 1;
if (attr & HL_INVERSE)
cell->attrs.reverse = 1;
}
/*
* Convert the attributes of a vterm cell into an attribute index.
*/
static int
cell2attr(VTermScreenCellAttrs cellattrs, VTermColor cellfg, VTermColor cellbg)
{
int attr = vtermAttr2hl(cellattrs);
#ifdef FEAT_GUI
if (gui.in_use)
{
guicolor_T fg, bg;
fg = gui_mch_get_rgb_color(cellfg.red, cellfg.green, cellfg.blue);
bg = gui_mch_get_rgb_color(cellbg.red, cellbg.green, cellbg.blue);
return get_gui_attr_idx(attr, fg, bg);
}
else
#endif
#ifdef FEAT_TERMGUICOLORS
if (p_tgc)
{
guicolor_T fg, bg;
fg = gui_get_rgb_color_cmn(cellfg.red, cellfg.green, cellfg.blue);
bg = gui_get_rgb_color_cmn(cellbg.red, cellbg.green, cellbg.blue);
return get_tgc_attr_idx(attr, fg, bg);
}
else
#endif
{
int bold = MAYBE;
int fg = color2index(&cellfg, TRUE, &bold);
int bg = color2index(&cellbg, FALSE, &bold);
/* Use the "Terminal" highlighting for the default colors. */
if ((fg == 0 || bg == 0) && t_colors >= 16)
{
if (fg == 0 && term_default_cterm_fg >= 0)
fg = term_default_cterm_fg + 1;
if (bg == 0 && term_default_cterm_bg >= 0)
bg = term_default_cterm_bg + 1;
}
/* with 8 colors set the bold attribute to get a bright foreground */
if (bold == TRUE)
attr |= HL_BOLD;
return get_cterm_attr_idx(attr, fg, bg);
}
return 0;
}
static void
set_dirty_snapshot(term_T *term)
{
term->tl_dirty_snapshot = TRUE;
#ifdef FEAT_TIMERS
if (!term->tl_normal_mode)
{
/* Update the snapshot after 100 msec of not getting updates. */
profile_setlimit(100L, &term->tl_timer_due);
term->tl_timer_set = TRUE;
}
#endif
}
static int
handle_damage(VTermRect rect, void *user)
{
term_T *term = (term_T *)user;
term->tl_dirty_row_start = MIN(term->tl_dirty_row_start, rect.start_row);
term->tl_dirty_row_end = MAX(term->tl_dirty_row_end, rect.end_row);
set_dirty_snapshot(term);
redraw_buf_later(term->tl_buffer, SOME_VALID);
return 1;
}
static void
term_scroll_up(term_T *term, int start_row, int count)
{
win_T *wp;
VTermColor fg, bg;
VTermScreenCellAttrs attr;
int clear_attr;
/* Set the color to clear lines with. */
vterm_state_get_default_colors(vterm_obtain_state(term->tl_vterm),
&fg, &bg);
vim_memset(&attr, 0, sizeof(attr));
clear_attr = cell2attr(attr, fg, bg);
FOR_ALL_WINDOWS(wp)
{
if (wp->w_buffer == term->tl_buffer)
win_del_lines(wp, start_row, count, FALSE, FALSE, clear_attr);
}
}
static int
handle_moverect(VTermRect dest, VTermRect src, void *user)
{
term_T *term = (term_T *)user;
int count = src.start_row - dest.start_row;
/* Scrolling up is done much more efficiently by deleting lines instead of
* redrawing the text. But avoid doing this multiple times, postpone until
* the redraw happens. */
if (dest.start_col == src.start_col
&& dest.end_col == src.end_col
&& dest.start_row < src.start_row)
{
if (dest.start_row == 0)
term->tl_postponed_scroll += count;
else
term_scroll_up(term, dest.start_row, count);
}
term->tl_dirty_row_start = MIN(term->tl_dirty_row_start, dest.start_row);
term->tl_dirty_row_end = MIN(term->tl_dirty_row_end, dest.end_row);
set_dirty_snapshot(term);
/* Note sure if the scrolling will work correctly, let's do a complete
* redraw later. */
redraw_buf_later(term->tl_buffer, NOT_VALID);
return 1;
}
static int
handle_movecursor(
VTermPos pos,
VTermPos oldpos UNUSED,
int visible,
void *user)
{
term_T *term = (term_T *)user;
win_T *wp;
term->tl_cursor_pos = pos;
term->tl_cursor_visible = visible;
FOR_ALL_WINDOWS(wp)
{
if (wp->w_buffer == term->tl_buffer)
position_cursor(wp, &pos);
}
if (term->tl_buffer == curbuf && !term->tl_normal_mode)
{
may_toggle_cursor(term);
update_cursor(term, term->tl_cursor_visible);
}
return 1;
}
static int
handle_settermprop(
VTermProp prop,
VTermValue *value,
void *user)
{
term_T *term = (term_T *)user;
switch (prop)
{
case VTERM_PROP_TITLE:
vim_free(term->tl_title);
/* a blank title isn't useful, make it empty, so that "running" is
* displayed */
if (*skipwhite((char_u *)value->string) == NUL)
term->tl_title = NULL;
#ifdef WIN3264
else if (!enc_utf8 && enc_codepage > 0)
{
WCHAR *ret = NULL;
int length = 0;
MultiByteToWideChar_alloc(CP_UTF8, 0,
(char*)value->string, (int)STRLEN(value->string),
&ret, &length);
if (ret != NULL)
{
WideCharToMultiByte_alloc(enc_codepage, 0,
ret, length, (char**)&term->tl_title,
&length, 0, 0);
vim_free(ret);
}
}
#endif
else
term->tl_title = vim_strsave((char_u *)value->string);
VIM_CLEAR(term->tl_status_text);
if (term == curbuf->b_term)
maketitle();
break;
case VTERM_PROP_CURSORVISIBLE:
term->tl_cursor_visible = value->boolean;
may_toggle_cursor(term);
out_flush();
break;
case VTERM_PROP_CURSORBLINK:
term->tl_cursor_blink = value->boolean;
may_set_cursor_props(term);
break;
case VTERM_PROP_CURSORSHAPE:
term->tl_cursor_shape = value->number;
may_set_cursor_props(term);
break;
case VTERM_PROP_CURSORCOLOR:
cursor_color_copy(&term->tl_cursor_color, (char_u*)value->string);
may_set_cursor_props(term);
break;
case VTERM_PROP_ALTSCREEN:
/* TODO: do anything else? */
term->tl_using_altscreen = value->boolean;
break;
default:
break;
}
/* Always return 1, otherwise vterm doesn't store the value internally. */
return 1;
}
/*
* The job running in the terminal resized the terminal.
*/
static int
handle_resize(int rows, int cols, void *user)
{
term_T *term = (term_T *)user;
win_T *wp;
term->tl_rows = rows;
term->tl_cols = cols;
if (term->tl_vterm_size_changed)
/* Size was set by vterm_set_size(), don't set the window size. */
term->tl_vterm_size_changed = FALSE;
else
{
FOR_ALL_WINDOWS(wp)
{
if (wp->w_buffer == term->tl_buffer)
{
win_setheight_win(rows, wp);
win_setwidth_win(cols, wp);
}
}
redraw_buf_later(term->tl_buffer, NOT_VALID);
}
return 1;
}
/*
* Handle a line that is pushed off the top of the screen.
*/
static int
handle_pushline(int cols, const VTermScreenCell *cells, void *user)
{
term_T *term = (term_T *)user;
/* First remove the lines that were appended before, the pushed line goes
* above it. */
cleanup_scrollback(term);
/* If the number of lines that are stored goes over 'termscrollback' then
* delete the first 10%. */
if (term->tl_scrollback.ga_len >= term->tl_buffer->b_p_twsl)
{
int todo = term->tl_buffer->b_p_twsl / 10;
int i;
curbuf = term->tl_buffer;
for (i = 0; i < todo; ++i)
{
vim_free(((sb_line_T *)term->tl_scrollback.ga_data + i)->sb_cells);
ml_delete(1, FALSE);
}
curbuf = curwin->w_buffer;
term->tl_scrollback.ga_len -= todo;
mch_memmove(term->tl_scrollback.ga_data,
(sb_line_T *)term->tl_scrollback.ga_data + todo,
sizeof(sb_line_T) * term->tl_scrollback.ga_len);
term->tl_scrollback_scrolled -= todo;
}
if (ga_grow(&term->tl_scrollback, 1) == OK)
{
cellattr_T *p = NULL;
int len = 0;
int i;
int c;
int col;
sb_line_T *line;
garray_T ga;
cellattr_T fill_attr = term->tl_default_color;
/* do not store empty cells at the end */
for (i = 0; i < cols; ++i)
if (cells[i].chars[0] != 0)
len = i + 1;
else
cell2cellattr(&cells[i], &fill_attr);
ga_init2(&ga, 1, 100);
if (len > 0)
p = (cellattr_T *)alloc((int)sizeof(cellattr_T) * len);
if (p != NULL)
{
for (col = 0; col < len; col += cells[col].width)
{
if (ga_grow(&ga, MB_MAXBYTES) == FAIL)
{
ga.ga_len = 0;
break;
}
for (i = 0; (c = cells[col].chars[i]) > 0 || i == 0; ++i)
ga.ga_len += utf_char2bytes(c == NUL ? ' ' : c,
(char_u *)ga.ga_data + ga.ga_len);
cell2cellattr(&cells[col], &p[col]);
}
}
if (ga_grow(&ga, 1) == FAIL)
add_scrollback_line_to_buffer(term, (char_u *)"", 0);
else
{
*((char_u *)ga.ga_data + ga.ga_len) = NUL;
add_scrollback_line_to_buffer(term, ga.ga_data, ga.ga_len);
}
ga_clear(&ga);
line = (sb_line_T *)term->tl_scrollback.ga_data
+ term->tl_scrollback.ga_len;
line->sb_cols = len;
line->sb_cells = p;
line->sb_fill_attr = fill_attr;
++term->tl_scrollback.ga_len;
++term->tl_scrollback_scrolled;
}
return 0; /* ignored */
}
static VTermScreenCallbacks screen_callbacks = {
handle_damage, /* damage */
handle_moverect, /* moverect */
handle_movecursor, /* movecursor */
handle_settermprop, /* settermprop */
NULL, /* bell */
handle_resize, /* resize */
handle_pushline, /* sb_pushline */
NULL /* sb_popline */
};
/*
* Do the work after the channel of a terminal was closed.
* Must be called only when updating_screen is FALSE.
* Returns TRUE when a buffer was closed (list of terminals may have changed).
*/
static int
term_after_channel_closed(term_T *term)
{
/* Unless in Terminal-Normal mode: clear the vterm. */
if (!term->tl_normal_mode)
{
int fnum = term->tl_buffer->b_fnum;
cleanup_vterm(term);
if (term->tl_finish == TL_FINISH_CLOSE)
{
aco_save_T aco;
int do_set_w_closing = term->tl_buffer->b_nwindows == 0;
// ++close or term_finish == "close"
ch_log(NULL, "terminal job finished, closing window");
aucmd_prepbuf(&aco, term->tl_buffer);
// Avoid closing the window if we temporarily use it.
if (do_set_w_closing)
curwin->w_closing = TRUE;
do_bufdel(DOBUF_WIPE, (char_u *)"", 1, fnum, fnum, FALSE);
if (do_set_w_closing)
curwin->w_closing = FALSE;
aucmd_restbuf(&aco);
return TRUE;
}
if (term->tl_finish == TL_FINISH_OPEN
&& term->tl_buffer->b_nwindows == 0)
{
char buf[50];
/* TODO: use term_opencmd */
ch_log(NULL, "terminal job finished, opening window");
vim_snprintf(buf, sizeof(buf),
term->tl_opencmd == NULL
? "botright sbuf %d"
: (char *)term->tl_opencmd, fnum);
do_cmdline_cmd((char_u *)buf);
}
else
ch_log(NULL, "terminal job finished");
}
redraw_buf_and_status_later(term->tl_buffer, NOT_VALID);
return FALSE;
}
/*
* Called when a channel has been closed.
* If this was a channel for a terminal window then finish it up.
*/
void
term_channel_closed(channel_T *ch)
{
term_T *term;
term_T *next_term;
int did_one = FALSE;
for (term = first_term; term != NULL; term = next_term)
{
next_term = term->tl_next;
if (term->tl_job == ch->ch_job)
{
term->tl_channel_closed = TRUE;
did_one = TRUE;
VIM_CLEAR(term->tl_title);
VIM_CLEAR(term->tl_status_text);
#ifdef WIN3264
if (term->tl_out_fd != NULL)
{
fclose(term->tl_out_fd);
term->tl_out_fd = NULL;
}
#endif
if (updating_screen)
{
/* Cannot open or close windows now. Can happen when
* 'lazyredraw' is set. */
term->tl_channel_recently_closed = TRUE;
continue;
}
if (term_after_channel_closed(term))
next_term = first_term;
}
}
if (did_one)
{
redraw_statuslines();
/* Need to break out of vgetc(). */
ins_char_typebuf(K_IGNORE);
typebuf_was_filled = TRUE;
term = curbuf->b_term;
if (term != NULL)
{
if (term->tl_job == ch->ch_job)
maketitle();
update_cursor(term, term->tl_cursor_visible);
}
}
}
/*
* To be called after resetting updating_screen: handle any terminal where the
* channel was closed.
*/
void
term_check_channel_closed_recently()
{
term_T *term;
term_T *next_term;
for (term = first_term; term != NULL; term = next_term)
{
next_term = term->tl_next;
if (term->tl_channel_recently_closed)
{
term->tl_channel_recently_closed = FALSE;
if (term_after_channel_closed(term))
// start over, the list may have changed
next_term = first_term;
}
}
}
/*
* Fill one screen line from a line of the terminal.
* Advances "pos" to past the last column.
*/
static void
term_line2screenline(VTermScreen *screen, VTermPos *pos, int max_col)
{
int off = screen_get_current_line_off();
for (pos->col = 0; pos->col < max_col; )
{
VTermScreenCell cell;
int c;
if (vterm_screen_get_cell(screen, *pos, &cell) == 0)
vim_memset(&cell, 0, sizeof(cell));
c = cell.chars[0];
if (c == NUL)
{
ScreenLines[off] = ' ';
if (enc_utf8)
ScreenLinesUC[off] = NUL;
}
else
{
if (enc_utf8)
{
int i;
/* composing chars */
for (i = 0; i < Screen_mco
&& i + 1 < VTERM_MAX_CHARS_PER_CELL; ++i)
{
ScreenLinesC[i][off] = cell.chars[i + 1];
if (cell.chars[i + 1] == 0)
break;
}
if (c >= 0x80 || (Screen_mco > 0
&& ScreenLinesC[0][off] != 0))
{
ScreenLines[off] = ' ';
ScreenLinesUC[off] = c;
}
else
{
ScreenLines[off] = c;
ScreenLinesUC[off] = NUL;
}
}
#ifdef WIN3264
else if (has_mbyte && c >= 0x80)
{
char_u mb[MB_MAXBYTES+1];
WCHAR wc = c;
if (WideCharToMultiByte(GetACP(), 0, &wc, 1,
(char*)mb, 2, 0, 0) > 1)
{
ScreenLines[off] = mb[0];
ScreenLines[off + 1] = mb[1];
cell.width = mb_ptr2cells(mb);
}
else
ScreenLines[off] = c;
}
#endif
else
ScreenLines[off] = c;
}
ScreenAttrs[off] = cell2attr(cell.attrs, cell.fg, cell.bg);
++pos->col;
++off;
if (cell.width == 2)
{
if (enc_utf8)
ScreenLinesUC[off] = NUL;
/* don't set the second byte to NUL for a DBCS encoding, it
* has been set above */
if (enc_utf8 || !has_mbyte)
ScreenLines[off] = NUL;
++pos->col;
++off;
}
}
}
#if defined(FEAT_GUI)
static void
update_system_term(term_T *term)
{
VTermPos pos;
VTermScreen *screen;
if (term->tl_vterm == NULL)
return;
screen = vterm_obtain_screen(term->tl_vterm);
/* Scroll up to make more room for terminal lines if needed. */
while (term->tl_toprow > 0
&& (Rows - term->tl_toprow) < term->tl_dirty_row_end)
{
int save_p_more = p_more;
p_more = FALSE;
msg_row = Rows - 1;
msg_puts((char_u *)"\n");
p_more = save_p_more;
--term->tl_toprow;
}
for (pos.row = term->tl_dirty_row_start; pos.row < term->tl_dirty_row_end
&& pos.row < Rows; ++pos.row)
{
if (pos.row < term->tl_rows)
{
int max_col = MIN(Columns, term->tl_cols);
term_line2screenline(screen, &pos, max_col);
}
else
pos.col = 0;
screen_line(term->tl_toprow + pos.row, 0, pos.col, Columns, FALSE);
}
term->tl_dirty_row_start = MAX_ROW;
term->tl_dirty_row_end = 0;
update_cursor(term, TRUE);
}
#endif
/*
* Return TRUE if window "wp" is to be redrawn with term_update_window().
* Returns FALSE when there is no terminal running in this window or it is in
* Terminal-Normal mode.
*/
int
term_do_update_window(win_T *wp)
{
term_T *term = wp->w_buffer->b_term;
return term != NULL && term->tl_vterm != NULL && !term->tl_normal_mode;
}
/*
* Called to update a window that contains an active terminal.
*/
void
term_update_window(win_T *wp)
{
term_T *term = wp->w_buffer->b_term;
VTerm *vterm;
VTermScreen *screen;
VTermState *state;
VTermPos pos;
int rows, cols;
int newrows, newcols;
int minsize;
win_T *twp;
vterm = term->tl_vterm;
screen = vterm_obtain_screen(vterm);
state = vterm_obtain_state(vterm);
/* We use NOT_VALID on a resize or scroll, redraw everything then. With
* SOME_VALID only redraw what was marked dirty. */
if (wp->w_redr_type > SOME_VALID)
{
term->tl_dirty_row_start = 0;
term->tl_dirty_row_end = MAX_ROW;
if (term->tl_postponed_scroll > 0
&& term->tl_postponed_scroll < term->tl_rows / 3)
/* Scrolling is usually faster than redrawing, when there are only
* a few lines to scroll. */
term_scroll_up(term, 0, term->tl_postponed_scroll);
term->tl_postponed_scroll = 0;
}
/*
* If the window was resized a redraw will be triggered and we get here.
* Adjust the size of the vterm unless 'termwinsize' specifies a fixed size.
*/
minsize = parse_termwinsize(wp, &rows, &cols);
newrows = 99999;
newcols = 99999;
FOR_ALL_WINDOWS(twp)
{
/* When more than one window shows the same terminal, use the
* smallest size. */
if (twp->w_buffer == term->tl_buffer)
{
newrows = MIN(newrows, twp->w_height);
newcols = MIN(newcols, twp->w_width);
}
}
newrows = rows == 0 ? newrows : minsize ? MAX(rows, newrows) : rows;
newcols = cols == 0 ? newcols : minsize ? MAX(cols, newcols) : cols;
if (term->tl_rows != newrows || term->tl_cols != newcols)
{
term->tl_vterm_size_changed = TRUE;
vterm_set_size(vterm, newrows, newcols);
ch_log(term->tl_job->jv_channel, "Resizing terminal to %d lines",
newrows);
term_report_winsize(term, newrows, newcols);
// Updating the terminal size will cause the snapshot to be cleared.
// When not in terminal_loop() we need to restore it.
if (term != in_terminal_loop)
may_move_terminal_to_buffer(term, FALSE);
}
/* The cursor may have been moved when resizing. */
vterm_state_get_cursorpos(state, &pos);
position_cursor(wp, &pos);
for (pos.row = term->tl_dirty_row_start; pos.row < term->tl_dirty_row_end
&& pos.row < wp->w_height; ++pos.row)
{
if (pos.row < term->tl_rows)
{
int max_col = MIN(wp->w_width, term->tl_cols);
term_line2screenline(screen, &pos, max_col);
}
else
pos.col = 0;
screen_line(wp->w_winrow + pos.row
#ifdef FEAT_MENU
+ winbar_height(wp)
#endif
, wp->w_wincol, pos.col, wp->w_width, FALSE);
}
term->tl_dirty_row_start = MAX_ROW;
term->tl_dirty_row_end = 0;
}
/*
* Return TRUE if "wp" is a terminal window where the job has finished.
*/
int
term_is_finished(buf_T *buf)
{
return buf->b_term != NULL && buf->b_term->tl_vterm == NULL;
}
/*
* Return TRUE if "wp" is a terminal window where the job has finished or we
* are in Terminal-Normal mode, thus we show the buffer contents.
*/
int
term_show_buffer(buf_T *buf)
{
term_T *term = buf->b_term;
return term != NULL && (term->tl_vterm == NULL || term->tl_normal_mode);
}
/*
* The current buffer is going to be changed. If there is terminal
* highlighting remove it now.
*/
void
term_change_in_curbuf(void)
{
term_T *term = curbuf->b_term;
if (term_is_finished(curbuf) && term->tl_scrollback.ga_len > 0)
{
free_scrollback(term);
redraw_buf_later(term->tl_buffer, NOT_VALID);
/* The buffer is now like a normal buffer, it cannot be easily
* abandoned when changed. */
set_string_option_direct((char_u *)"buftype", -1,
(char_u *)"", OPT_FREE|OPT_LOCAL, 0);
}
}
/*
* Get the screen attribute for a position in the buffer.
* Use a negative "col" to get the filler background color.
*/
int
term_get_attr(buf_T *buf, linenr_T lnum, int col)
{
term_T *term = buf->b_term;
sb_line_T *line;
cellattr_T *cellattr;
if (lnum > term->tl_scrollback.ga_len)
cellattr = &term->tl_default_color;
else
{
line = (sb_line_T *)term->tl_scrollback.ga_data + lnum - 1;
if (col < 0 || col >= line->sb_cols)
cellattr = &line->sb_fill_attr;
else
cellattr = line->sb_cells + col;
}
return cell2attr(cellattr->attrs, cellattr->fg, cellattr->bg);
}
/*
* Convert a cterm color number 0 - 255 to RGB.
* This is compatible with xterm.
*/
static void
cterm_color2vterm(int nr, VTermColor *rgb)
{
cterm_color2rgb(nr, &rgb->red, &rgb->green, &rgb->blue, &rgb->ansi_index);
}
/*
* Initialize term->tl_default_color from the environment.
*/
static void
init_default_colors(term_T *term)
{
VTermColor *fg, *bg;
int fgval, bgval;
int id;
vim_memset(&term->tl_default_color.attrs, 0, sizeof(VTermScreenCellAttrs));
term->tl_default_color.width = 1;
fg = &term->tl_default_color.fg;
bg = &term->tl_default_color.bg;
/* Vterm uses a default black background. Set it to white when
* 'background' is "light". */
if (*p_bg == 'l')
{
fgval = 0;
bgval = 255;
}
else
{
fgval = 255;
bgval = 0;
}
fg->red = fg->green = fg->blue = fgval;
bg->red = bg->green = bg->blue = bgval;
fg->ansi_index = bg->ansi_index = VTERM_ANSI_INDEX_DEFAULT;
/* The "Terminal" highlight group overrules the defaults. */
id = syn_name2id((char_u *)"Terminal");
/* Use the actual color for the GUI and when 'termguicolors' is set. */
#if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS)
if (0
# ifdef FEAT_GUI
|| gui.in_use
# endif
# ifdef FEAT_TERMGUICOLORS
|| p_tgc
# ifdef FEAT_VTP
/* Finally get INVALCOLOR on this execution path */
|| (!p_tgc && t_colors >= 256)
# endif
# endif
)
{
guicolor_T fg_rgb = INVALCOLOR;
guicolor_T bg_rgb = INVALCOLOR;
if (id != 0)
syn_id2colors(id, &fg_rgb, &bg_rgb);
# ifdef FEAT_GUI
if (gui.in_use)
{
if (fg_rgb == INVALCOLOR)
fg_rgb = gui.norm_pixel;
if (bg_rgb == INVALCOLOR)
bg_rgb = gui.back_pixel;
}
# ifdef FEAT_TERMGUICOLORS
else
# endif
# endif
# ifdef FEAT_TERMGUICOLORS
{
if (fg_rgb == INVALCOLOR)
fg_rgb = cterm_normal_fg_gui_color;
if (bg_rgb == INVALCOLOR)
bg_rgb = cterm_normal_bg_gui_color;
}
# endif
if (fg_rgb != INVALCOLOR)
{
long_u rgb = GUI_MCH_GET_RGB(fg_rgb);
fg->red = (unsigned)(rgb >> 16);
fg->green = (unsigned)(rgb >> 8) & 255;
fg->blue = (unsigned)rgb & 255;
}
if (bg_rgb != INVALCOLOR)
{
long_u rgb = GUI_MCH_GET_RGB(bg_rgb);
bg->red = (unsigned)(rgb >> 16);
bg->green = (unsigned)(rgb >> 8) & 255;
bg->blue = (unsigned)rgb & 255;
}
}
else
#endif
if (id != 0 && t_colors >= 16)
{
if (term_default_cterm_fg >= 0)
cterm_color2vterm(term_default_cterm_fg, fg);
if (term_default_cterm_bg >= 0)
cterm_color2vterm(term_default_cterm_bg, bg);
}
else
{
#if defined(WIN3264) && !defined(FEAT_GUI_W32)
int tmp;
#endif
/* In an MS-Windows console we know the normal colors. */
if (cterm_normal_fg_color > 0)
{
cterm_color2vterm(cterm_normal_fg_color - 1, fg);
# if defined(WIN3264) && !defined(FEAT_GUI_W32)
tmp = fg->red;
fg->red = fg->blue;
fg->blue = tmp;
# endif
}
# ifdef FEAT_TERMRESPONSE
else
term_get_fg_color(&fg->red, &fg->green, &fg->blue);
# endif
if (cterm_normal_bg_color > 0)
{
cterm_color2vterm(cterm_normal_bg_color - 1, bg);
# if defined(WIN3264) && !defined(FEAT_GUI_W32)
tmp = bg->red;
bg->red = bg->blue;
bg->blue = tmp;
# endif
}
# ifdef FEAT_TERMRESPONSE
else
term_get_bg_color(&bg->red, &bg->green, &bg->blue);
# endif
}
}
#if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS)
/*
* Set the 16 ANSI colors from array of RGB values
*/
static void
set_vterm_palette(VTerm *vterm, long_u *rgb)
{
int index = 0;
VTermState *state = vterm_obtain_state(vterm);
for (; index < 16; index++)
{
VTermColor color;
color.red = (unsigned)(rgb[index] >> 16);
color.green = (unsigned)(rgb[index] >> 8) & 255;
color.blue = (unsigned)rgb[index] & 255;
vterm_state_set_palette_color(state, index, &color);
}
}
/*
* Set the ANSI color palette from a list of colors
*/
static int
set_ansi_colors_list(VTerm *vterm, list_T *list)
{
int n = 0;
long_u rgb[16];
listitem_T *li = list->lv_first;
for (; li != NULL && n < 16; li = li->li_next, n++)
{
char_u *color_name;
guicolor_T guicolor;
color_name = tv_get_string_chk(&li->li_tv);
if (color_name == NULL)
return FAIL;
guicolor = GUI_GET_COLOR(color_name);
if (guicolor == INVALCOLOR)
return FAIL;
rgb[n] = GUI_MCH_GET_RGB(guicolor);
}
if (n != 16 || li != NULL)
return FAIL;
set_vterm_palette(vterm, rgb);
return OK;
}
/*
* Initialize the ANSI color palette from g:terminal_ansi_colors[0:15]
*/
static void
init_vterm_ansi_colors(VTerm *vterm)
{
dictitem_T *var = find_var((char_u *)"g:terminal_ansi_colors", NULL, TRUE);
if (var != NULL
&& (var->di_tv.v_type != VAR_LIST
|| var->di_tv.vval.v_list == NULL
|| set_ansi_colors_list(vterm, var->di_tv.vval.v_list) == FAIL))
EMSG2(_(e_invarg2), "g:terminal_ansi_colors");
}
#endif
/*
* Handles a "drop" command from the job in the terminal.
* "item" is the file name, "item->li_next" may have options.
*/
static void
handle_drop_command(listitem_T *item)
{
char_u *fname = tv_get_string(&item->li_tv);
listitem_T *opt_item = item->li_next;
int bufnr;
win_T *wp;
tabpage_T *tp;
exarg_T ea;
char_u *tofree = NULL;
bufnr = buflist_add(fname, BLN_LISTED | BLN_NOOPT);
FOR_ALL_TAB_WINDOWS(tp, wp)
{
if (wp->w_buffer->b_fnum == bufnr)
{
/* buffer is in a window already, go there */
goto_tabpage_win(tp, wp);
return;
}
}
vim_memset(&ea, 0, sizeof(ea));
if (opt_item != NULL && opt_item->li_tv.v_type == VAR_DICT
&& opt_item->li_tv.vval.v_dict != NULL)
{
dict_T *dict = opt_item->li_tv.vval.v_dict;
char_u *p;
p = dict_get_string(dict, (char_u *)"ff", FALSE);
if (p == NULL)
p = dict_get_string(dict, (char_u *)"fileformat", FALSE);
if (p != NULL)
{
if (check_ff_value(p) == FAIL)
ch_log(NULL, "Invalid ff argument to drop: %s", p);
else
ea.force_ff = *p;
}
p = dict_get_string(dict, (char_u *)"enc", FALSE);
if (p == NULL)
p = dict_get_string(dict, (char_u *)"encoding", FALSE);
if (p != NULL)
{
ea.cmd = alloc((int)STRLEN(p) + 12);
if (ea.cmd != NULL)
{
sprintf((char *)ea.cmd, "sbuf ++enc=%s", p);
ea.force_enc = 11;
tofree = ea.cmd;
}
}
p = dict_get_string(dict, (char_u *)"bad", FALSE);
if (p != NULL)
get_bad_opt(p, &ea);
if (dict_find(dict, (char_u *)"bin", -1) != NULL)
ea.force_bin = FORCE_BIN;
if (dict_find(dict, (char_u *)"binary", -1) != NULL)
ea.force_bin = FORCE_BIN;
if (dict_find(dict, (char_u *)"nobin", -1) != NULL)
ea.force_bin = FORCE_NOBIN;
if (dict_find(dict, (char_u *)"nobinary", -1) != NULL)
ea.force_bin = FORCE_NOBIN;
}
/* open in new window, like ":split fname" */
if (ea.cmd == NULL)
ea.cmd = (char_u *)"split";
ea.arg = fname;
ea.cmdidx = CMD_split;
ex_splitview(&ea);
vim_free(tofree);
}
/*
* Handles a function call from the job running in a terminal.
* "item" is the function name, "item->li_next" has the arguments.
*/
static void
handle_call_command(term_T *term, channel_T *channel, listitem_T *item)
{
char_u *func;
typval_T argvars[2];
typval_T rettv;
int doesrange;
if (item->li_next == NULL)
{
ch_log(channel, "Missing function arguments for call");
return;
}
func = tv_get_string(&item->li_tv);
if (STRNCMP(func, "Tapi_", 5) != 0)
{
ch_log(channel, "Invalid function name: %s", func);
return;
}
argvars[0].v_type = VAR_NUMBER;
argvars[0].vval.v_number = term->tl_buffer->b_fnum;
argvars[1] = item->li_next->li_tv;
if (call_func(func, (int)STRLEN(func), &rettv,
2, argvars, /* argv_func */ NULL,
/* firstline */ 1, /* lastline */ 1,
&doesrange, /* evaluate */ TRUE,
/* partial */ NULL, /* selfdict */ NULL) == OK)
{
clear_tv(&rettv);
ch_log(channel, "Function %s called", func);
}
else
ch_log(channel, "Calling function %s failed", func);
}
/*
* Called by libvterm when it cannot recognize an OSC sequence.
* We recognize a terminal API command.
*/
static int
parse_osc(const char *command, size_t cmdlen, void *user)
{
term_T *term = (term_T *)user;
js_read_T reader;
typval_T tv;
channel_T *channel = term->tl_job == NULL ? NULL
: term->tl_job->jv_channel;
/* We recognize only OSC 5 1 ; {command} */
if (cmdlen < 3 || STRNCMP(command, "51;", 3) != 0)
return 0; /* not handled */
reader.js_buf = vim_strnsave((char_u *)command + 3, (int)(cmdlen - 3));
if (reader.js_buf == NULL)
return 1;
reader.js_fill = NULL;
reader.js_used = 0;
if (json_decode(&reader, &tv, 0) == OK
&& tv.v_type == VAR_LIST
&& tv.vval.v_list != NULL)
{
listitem_T *item = tv.vval.v_list->lv_first;
if (item == NULL)
ch_log(channel, "Missing command");
else
{
char_u *cmd = tv_get_string(&item->li_tv);
/* Make sure an invoked command doesn't delete the buffer (and the
* terminal) under our fingers. */
++term->tl_buffer->b_locked;
item = item->li_next;
if (item == NULL)
ch_log(channel, "Missing argument for %s", cmd);
else if (STRCMP(cmd, "drop") == 0)
handle_drop_command(item);
else if (STRCMP(cmd, "call") == 0)
handle_call_command(term, channel, item);
else
ch_log(channel, "Invalid command received: %s", cmd);
--term->tl_buffer->b_locked;
}
}
else
ch_log(channel, "Invalid JSON received");
vim_free(reader.js_buf);
clear_tv(&tv);
return 1;
}
static VTermParserCallbacks parser_fallbacks = {
NULL, /* text */
NULL, /* control */
NULL, /* escape */
NULL, /* csi */
parse_osc, /* osc */
NULL, /* dcs */
NULL /* resize */
};
/*
* Use Vim's allocation functions for vterm so profiling works.
*/
static void *
vterm_malloc(size_t size, void *data UNUSED)
{
return alloc_clear((unsigned) size);
}
static void
vterm_memfree(void *ptr, void *data UNUSED)
{
vim_free(ptr);
}
static VTermAllocatorFunctions vterm_allocator = {
&vterm_malloc,
&vterm_memfree
};
/*
* Create a new vterm and initialize it.
* Return FAIL when out of memory.
*/
static int
create_vterm(term_T *term, int rows, int cols)
{
VTerm *vterm;
VTermScreen *screen;
VTermState *state;
VTermValue value;
vterm = vterm_new_with_allocator(rows, cols, &vterm_allocator, NULL);
term->tl_vterm = vterm;
if (vterm == NULL)
return FAIL;
// Allocate screen and state here, so we can bail out if that fails.
state = vterm_obtain_state(vterm);
screen = vterm_obtain_screen(vterm);
if (state == NULL || screen == NULL)
{
vterm_free(vterm);
return FAIL;
}
vterm_screen_set_callbacks(screen, &screen_callbacks, term);
/* TODO: depends on 'encoding'. */
vterm_set_utf8(vterm, 1);
init_default_colors(term);
vterm_state_set_default_colors(
state,
&term->tl_default_color.fg,
&term->tl_default_color.bg);
if (t_colors >= 16)
vterm_state_set_bold_highbright(vterm_obtain_state(vterm), 1);
/* Required to initialize most things. */
vterm_screen_reset(screen, 1 /* hard */);
/* Allow using alternate screen. */
vterm_screen_enable_altscreen(screen, 1);
/* For unix do not use a blinking cursor. In an xterm this causes the
* cursor to blink if it's blinking in the xterm.
* For Windows we respect the system wide setting. */
#ifdef WIN3264
if (GetCaretBlinkTime() == INFINITE)
value.boolean = 0;
else
value.boolean = 1;
#else
value.boolean = 0;
#endif
vterm_state_set_termprop(state, VTERM_PROP_CURSORBLINK, &value);
vterm_state_set_unrecognised_fallbacks(state, &parser_fallbacks, term);
return OK;
}
/*
* Return the text to show for the buffer name and status.
*/
char_u *
term_get_status_text(term_T *term)
{
if (term->tl_status_text == NULL)
{
char_u *txt;
size_t len;
if (term->tl_normal_mode)
{
if (term_job_running(term))
txt = (char_u *)_("Terminal");
else
txt = (char_u *)_("Terminal-finished");
}
else if (term->tl_title != NULL)
txt = term->tl_title;
else if (term_none_open(term))
txt = (char_u *)_("active");
else if (term_job_running(term))
txt = (char_u *)_("running");
else
txt = (char_u *)_("finished");
len = 9 + STRLEN(term->tl_buffer->b_fname) + STRLEN(txt);
term->tl_status_text = alloc((int)len);
if (term->tl_status_text != NULL)
vim_snprintf((char *)term->tl_status_text, len, "%s [%s]",
term->tl_buffer->b_fname, txt);
}
return term->tl_status_text;
}
/*
* Mark references in jobs of terminals.
*/
int
set_ref_in_term(int copyID)
{
int abort = FALSE;
term_T *term;
typval_T tv;
for (term = first_term; term != NULL; term = term->tl_next)
if (term->tl_job != NULL)
{
tv.v_type = VAR_JOB;
tv.vval.v_job = term->tl_job;
abort = abort || set_ref_in_item(&tv, copyID, NULL, NULL);
}
return abort;
}
/*
* Cache "Terminal" highlight group colors.
*/
void
set_terminal_default_colors(int cterm_fg, int cterm_bg)
{
term_default_cterm_fg = cterm_fg - 1;
term_default_cterm_bg = cterm_bg - 1;
}
/*
* Get the buffer from the first argument in "argvars".
* Returns NULL when the buffer is not for a terminal window and logs a message
* with "where".
*/
static buf_T *
term_get_buf(typval_T *argvars, char *where)
{
buf_T *buf;
(void)tv_get_number(&argvars[0]); /* issue errmsg if type error */
++emsg_off;
buf = get_buf_tv(&argvars[0], FALSE);
--emsg_off;
if (buf == NULL || buf->b_term == NULL)
{
ch_log(NULL, "%s: invalid buffer argument", where);
return NULL;
}
return buf;
}
static int
same_color(VTermColor *a, VTermColor *b)
{
return a->red == b->red
&& a->green == b->green
&& a->blue == b->blue
&& a->ansi_index == b->ansi_index;
}
static void
dump_term_color(FILE *fd, VTermColor *color)
{
fprintf(fd, "%02x%02x%02x%d",
(int)color->red, (int)color->green, (int)color->blue,
(int)color->ansi_index);
}
/*
* "term_dumpwrite(buf, filename, options)" function
*
* Each screen cell in full is:
* |{characters}+{attributes}#{fg-color}{color-idx}#{bg-color}{color-idx}
* {characters} is a space for an empty cell
* For a double-width character "+" is changed to "*" and the next cell is
* skipped.
* {attributes} is the decimal value of HL_BOLD + HL_UNDERLINE, etc.
* when "&" use the same as the previous cell.
* {fg-color} is hex RGB, when "&" use the same as the previous cell.
* {bg-color} is hex RGB, when "&" use the same as the previous cell.
* {color-idx} is a number from 0 to 255
*
* Screen cell with same width, attributes and color as the previous one:
* |{characters}
*
* To use the color of the previous cell, use "&" instead of {color}-{idx}.
*
* Repeating the previous screen cell:
* @{count}
*/
void
f_term_dumpwrite(typval_T *argvars, typval_T *rettv UNUSED)
{
buf_T *buf = term_get_buf(argvars, "term_dumpwrite()");
term_T *term;
char_u *fname;
int max_height = 0;
int max_width = 0;
stat_T st;
FILE *fd;
VTermPos pos;
VTermScreen *screen;
VTermScreenCell prev_cell;
VTermState *state;
VTermPos cursor_pos;
if (check_restricted() || check_secure())
return;
if (buf == NULL)
return;
term = buf->b_term;
if (term->tl_vterm == NULL)
{
EMSG(_("E958: Job already finished"));
return;
}
if (argvars[2].v_type != VAR_UNKNOWN)
{
dict_T *d;
if (argvars[2].v_type != VAR_DICT)
{
EMSG(_(e_dictreq));
return;
}
d = argvars[2].vval.v_dict;
if (d != NULL)
{
max_height = dict_get_number(d, (char_u *)"rows");
max_width = dict_get_number(d, (char_u *)"columns");
}
}
fname = tv_get_string_chk(&argvars[1]);
if (fname == NULL)
return;
if (mch_stat((char *)fname, &st) >= 0)
{
EMSG2(_("E953: File exists: %s"), fname);
return;
}
if (*fname == NUL || (fd = mch_fopen((char *)fname, WRITEBIN)) == NULL)
{
EMSG2(_(e_notcreate), *fname == NUL ? (char_u *)_("<empty>") : fname);
return;
}
vim_memset(&prev_cell, 0, sizeof(prev_cell));
screen = vterm_obtain_screen(term->tl_vterm);
state = vterm_obtain_state(term->tl_vterm);
vterm_state_get_cursorpos(state, &cursor_pos);
for (pos.row = 0; (max_height == 0 || pos.row < max_height)
&& pos.row < term->tl_rows; ++pos.row)
{
int repeat = 0;
for (pos.col = 0; (max_width == 0 || pos.col < max_width)
&& pos.col < term->tl_cols; ++pos.col)
{
VTermScreenCell cell;
int same_attr;
int same_chars = TRUE;
int i;
int is_cursor_pos = (pos.col == cursor_pos.col
&& pos.row == cursor_pos.row);
if (vterm_screen_get_cell(screen, pos, &cell) == 0)
vim_memset(&cell, 0, sizeof(cell));
for (i = 0; i < VTERM_MAX_CHARS_PER_CELL; ++i)
{
int c = cell.chars[i];
int pc = prev_cell.chars[i];
/* For the first character NUL is the same as space. */
if (i == 0)
{
c = (c == NUL) ? ' ' : c;
pc = (pc == NUL) ? ' ' : pc;
}
if (c != pc)
same_chars = FALSE;
if (c == NUL || pc == NUL)
break;
}
same_attr = vtermAttr2hl(cell.attrs)
== vtermAttr2hl(prev_cell.attrs)
&& same_color(&cell.fg, &prev_cell.fg)
&& same_color(&cell.bg, &prev_cell.bg);
if (same_chars && cell.width == prev_cell.width && same_attr
&& !is_cursor_pos)
{
++repeat;
}
else
{
if (repeat > 0)
{
fprintf(fd, "@%d", repeat);
repeat = 0;
}
fputs(is_cursor_pos ? ">" : "|", fd);
if (cell.chars[0] == NUL)
fputs(" ", fd);
else
{
char_u charbuf[10];
int len;
for (i = 0; i < VTERM_MAX_CHARS_PER_CELL
&& cell.chars[i] != NUL; ++i)
{
len = utf_char2bytes(cell.chars[i], charbuf);
fwrite(charbuf, len, 1, fd);
}
}
/* When only the characters differ we don't write anything, the
* following "|", "@" or NL will indicate using the same
* attributes. */
if (cell.width != prev_cell.width || !same_attr)
{
if (cell.width == 2)
{
fputs("*", fd);
++pos.col;
}
else
fputs("+", fd);
if (same_attr)
{
fputs("&", fd);
}
else
{
fprintf(fd, "%d", vtermAttr2hl(cell.attrs));
if (same_color(&cell.fg, &prev_cell.fg))
fputs("&", fd);
else
{
fputs("#", fd);
dump_term_color(fd, &cell.fg);
}
if (same_color(&cell.bg, &prev_cell.bg))
fputs("&", fd);
else
{
fputs("#", fd);
dump_term_color(fd, &cell.bg);
}
}
}
prev_cell = cell;
}
}
if (repeat > 0)
fprintf(fd, "@%d", repeat);
fputs("\n", fd);
}
fclose(fd);
}
/*
* Called when a dump is corrupted. Put a breakpoint here when debugging.
*/
static void
dump_is_corrupt(garray_T *gap)
{
ga_concat(gap, (char_u *)"CORRUPT");
}
static void
append_cell(garray_T *gap, cellattr_T *cell)
{
if (ga_grow(gap, 1) == OK)
{
*(((cellattr_T *)gap->ga_data) + gap->ga_len) = *cell;
++gap->ga_len;
}
}
/*
* Read the dump file from "fd" and append lines to the current buffer.
* Return the cell width of the longest line.
*/
static int
read_dump_file(FILE *fd, VTermPos *cursor_pos)
{
int c;
garray_T ga_text;
garray_T ga_cell;
char_u *prev_char = NULL;
int attr = 0;
cellattr_T cell;
term_T *term = curbuf->b_term;
int max_cells = 0;
int start_row = term->tl_scrollback.ga_len;
ga_init2(&ga_text, 1, 90);
ga_init2(&ga_cell, sizeof(cellattr_T), 90);
vim_memset(&cell, 0, sizeof(cell));
cursor_pos->row = -1;
cursor_pos->col = -1;
c = fgetc(fd);
for (;;)
{
if (c == EOF)
break;
if (c == '\r')
{
// DOS line endings? Ignore.
c = fgetc(fd);
}
else if (c == '\n')
{
/* End of a line: append it to the buffer. */
if (ga_text.ga_data == NULL)
dump_is_corrupt(&ga_text);
if (ga_grow(&term->tl_scrollback, 1) == OK)
{
sb_line_T *line = (sb_line_T *)term->tl_scrollback.ga_data
+ term->tl_scrollback.ga_len;
if (max_cells < ga_cell.ga_len)
max_cells = ga_cell.ga_len;
line->sb_cols = ga_cell.ga_len;
line->sb_cells = ga_cell.ga_data;
line->sb_fill_attr = term->tl_default_color;
++term->tl_scrollback.ga_len;
ga_init(&ga_cell);
ga_append(&ga_text, NUL);
ml_append(curbuf->b_ml.ml_line_count, ga_text.ga_data,
ga_text.ga_len, FALSE);
}
else
ga_clear(&ga_cell);
ga_text.ga_len = 0;
c = fgetc(fd);
}
else if (c == '|' || c == '>')
{
int prev_len = ga_text.ga_len;
if (c == '>')
{
if (cursor_pos->row != -1)
dump_is_corrupt(&ga_text); /* duplicate cursor */
cursor_pos->row = term->tl_scrollback.ga_len - start_row;
cursor_pos->col = ga_cell.ga_len;
}
/* normal character(s) followed by "+", "*", "|", "@" or NL */
c = fgetc(fd);
if (c != EOF)
ga_append(&ga_text, c);
for (;;)
{
c = fgetc(fd);
if (c == '+' || c == '*' || c == '|' || c == '>' || c == '@'
|| c == EOF || c == '\n')
break;
ga_append(&ga_text, c);
}
/* save the character for repeating it */
vim_free(prev_char);
if (ga_text.ga_data != NULL)
prev_char = vim_strnsave(((char_u *)ga_text.ga_data) + prev_len,
ga_text.ga_len - prev_len);
if (c == '@' || c == '|' || c == '>' || c == '\n')
{
/* use all attributes from previous cell */
}
else if (c == '+' || c == '*')
{
int is_bg;
cell.width = c == '+' ? 1 : 2;
c = fgetc(fd);
if (c == '&')
{
/* use same attr as previous cell */
c = fgetc(fd);
}
else if (isdigit(c))
{
/* get the decimal attribute */
attr = 0;
while (isdigit(c))
{
attr = attr * 10 + (c - '0');
c = fgetc(fd);
}
hl2vtermAttr(attr, &cell);
}
else
dump_is_corrupt(&ga_text);
/* is_bg == 0: fg, is_bg == 1: bg */
for (is_bg = 0; is_bg <= 1; ++is_bg)
{
if (c == '&')
{
/* use same color as previous cell */
c = fgetc(fd);
}
else if (c == '#')
{
int red, green, blue, index = 0;
c = fgetc(fd);
red = hex2nr(c);
c = fgetc(fd);
red = (red << 4) + hex2nr(c);
c = fgetc(fd);
green = hex2nr(c);
c = fgetc(fd);
green = (green << 4) + hex2nr(c);
c = fgetc(fd);
blue = hex2nr(c);
c = fgetc(fd);
blue = (blue << 4) + hex2nr(c);
c = fgetc(fd);
if (!isdigit(c))
dump_is_corrupt(&ga_text);
while (isdigit(c))
{
index = index * 10 + (c - '0');
c = fgetc(fd);
}
if (is_bg)
{
cell.bg.red = red;
cell.bg.green = green;
cell.bg.blue = blue;
cell.bg.ansi_index = index;
}
else
{
cell.fg.red = red;
cell.fg.green = green;
cell.fg.blue = blue;
cell.fg.ansi_index = index;
}
}
else
dump_is_corrupt(&ga_text);
}
}
else
dump_is_corrupt(&ga_text);
append_cell(&ga_cell, &cell);
}
else if (c == '@')
{
if (prev_char == NULL)
dump_is_corrupt(&ga_text);
else
{
int count = 0;
/* repeat previous character, get the count */
for (;;)
{
c = fgetc(fd);
if (!isdigit(c))
break;
count = count * 10 + (c - '0');
}
while (count-- > 0)
{
ga_concat(&ga_text, prev_char);
append_cell(&ga_cell, &cell);
}
}
}
else
{
dump_is_corrupt(&ga_text);
c = fgetc(fd);
}
}
if (ga_text.ga_len > 0)
{
/* trailing characters after last NL */
dump_is_corrupt(&ga_text);
ga_append(&ga_text, NUL);
ml_append(curbuf->b_ml.ml_line_count, ga_text.ga_data,
ga_text.ga_len, FALSE);
}
ga_clear(&ga_text);
vim_free(prev_char);
return max_cells;
}
/*
* Return an allocated string with at least "text_width" "=" characters and
* "fname" inserted in the middle.
*/
static char_u *
get_separator(int text_width, char_u *fname)
{
int width = MAX(text_width, curwin->w_width);
char_u *textline;
int fname_size;
char_u *p = fname;
int i;
size_t off;
textline = alloc(width + (int)STRLEN(fname) + 1);
if (textline == NULL)
return NULL;
fname_size = vim_strsize(fname);
if (fname_size < width - 8)
{
/* enough room, don't use the full window width */
width = MAX(text_width, fname_size + 8);
}
else if (fname_size > width - 8)
{
/* full name doesn't fit, use only the tail */
p = gettail(fname);
fname_size = vim_strsize(p);
}
/* skip characters until the name fits */
while (fname_size > width - 8)
{
p += (*mb_ptr2len)(p);
fname_size = vim_strsize(p);
}
for (i = 0; i < (width - fname_size) / 2 - 1; ++i)
textline[i] = '=';
textline[i++] = ' ';
STRCPY(textline + i, p);
off = STRLEN(textline);
textline[off] = ' ';
for (i = 1; i < (width - fname_size) / 2; ++i)
textline[off + i] = '=';
textline[off + i] = NUL;
return textline;
}
/*
* Common for "term_dumpdiff()" and "term_dumpload()".
*/
static void
term_load_dump(typval_T *argvars, typval_T *rettv, int do_diff)
{
jobopt_T opt;
buf_T *buf;
char_u buf1[NUMBUFLEN];
char_u buf2[NUMBUFLEN];
char_u *fname1;
char_u *fname2 = NULL;
char_u *fname_tofree = NULL;
FILE *fd1;
FILE *fd2 = NULL;
char_u *textline = NULL;
/* First open the files. If this fails bail out. */
fname1 = tv_get_string_buf_chk(&argvars[0], buf1);
if (do_diff)
fname2 = tv_get_string_buf_chk(&argvars[1], buf2);
if (fname1 == NULL || (do_diff && fname2 == NULL))
{
EMSG(_(e_invarg));
return;
}
fd1 = mch_fopen((char *)fname1, READBIN);
if (fd1 == NULL)
{
EMSG2(_(e_notread), fname1);
return;
}
if (do_diff)
{
fd2 = mch_fopen((char *)fname2, READBIN);
if (fd2 == NULL)
{
fclose(fd1);
EMSG2(_(e_notread), fname2);
return;
}
}
init_job_options(&opt);
if (argvars[do_diff ? 2 : 1].v_type != VAR_UNKNOWN
&& get_job_options(&argvars[do_diff ? 2 : 1], &opt, 0,
JO2_TERM_NAME + JO2_TERM_COLS + JO2_TERM_ROWS
+ JO2_VERTICAL + JO2_CURWIN + JO2_NORESTORE) == FAIL)
goto theend;
if (opt.jo_term_name == NULL)
{
size_t len = STRLEN(fname1) + 12;
fname_tofree = alloc((int)len);
if (fname_tofree != NULL)
{
vim_snprintf((char *)fname_tofree, len, "dump diff %s", fname1);
opt.jo_term_name = fname_tofree;
}
}
buf = term_start(&argvars[0], NULL, &opt, TERM_START_NOJOB);
if (buf != NULL && buf->b_term != NULL)
{
int i;
linenr_T bot_lnum;
linenr_T lnum;
term_T *term = buf->b_term;
int width;
int width2;
VTermPos cursor_pos1;
VTermPos cursor_pos2;
init_default_colors(term);
rettv->vval.v_number = buf->b_fnum;
/* read the files, fill the buffer with the diff */
width = read_dump_file(fd1, &cursor_pos1);
/* position the cursor */
if (cursor_pos1.row >= 0)
{
curwin->w_cursor.lnum = cursor_pos1.row + 1;
coladvance(cursor_pos1.col);
}
/* Delete the empty line that was in the empty buffer. */
ml_delete(1, FALSE);
/* For term_dumpload() we are done here. */
if (!do_diff)
goto theend;
term->tl_top_diff_rows = curbuf->b_ml.ml_line_count;
textline = get_separator(width, fname1);
if (textline == NULL)
goto theend;
if (add_empty_scrollback(term, &term->tl_default_color, 0) == OK)
ml_append(curbuf->b_ml.ml_line_count, textline, 0, FALSE);
vim_free(textline);
textline = get_separator(width, fname2);
if (textline == NULL)
goto theend;
if (add_empty_scrollback(term, &term->tl_default_color, 0) == OK)
ml_append(curbuf->b_ml.ml_line_count, textline, 0, FALSE);
textline[width] = NUL;
bot_lnum = curbuf->b_ml.ml_line_count;
width2 = read_dump_file(fd2, &cursor_pos2);
if (width2 > width)
{
vim_free(textline);
textline = alloc(width2 + 1);
if (textline == NULL)
goto theend;
width = width2;
textline[width] = NUL;
}
term->tl_bot_diff_rows = curbuf->b_ml.ml_line_count - bot_lnum;
for (lnum = 1; lnum <= term->tl_top_diff_rows; ++lnum)
{
if (lnum + bot_lnum > curbuf->b_ml.ml_line_count)
{
/* bottom part has fewer rows, fill with "-" */
for (i = 0; i < width; ++i)
textline[i] = '-';
}
else
{
char_u *line1;
char_u *line2;
char_u *p1;
char_u *p2;
int col;
sb_line_T *sb_line = (sb_line_T *)term->tl_scrollback.ga_data;
cellattr_T *cellattr1 = (sb_line + lnum - 1)->sb_cells;
cellattr_T *cellattr2 = (sb_line + lnum + bot_lnum - 1)
->sb_cells;
/* Make a copy, getting the second line will invalidate it. */
line1 = vim_strsave(ml_get(lnum));
if (line1 == NULL)
break;
p1 = line1;
line2 = ml_get(lnum + bot_lnum);
p2 = line2;
for (col = 0; col < width && *p1 != NUL && *p2 != NUL; ++col)
{
int len1 = utfc_ptr2len(p1);
int len2 = utfc_ptr2len(p2);
textline[col] = ' ';
if (len1 != len2 || STRNCMP(p1, p2, len1) != 0)
/* text differs */
textline[col] = 'X';
else if (lnum == cursor_pos1.row + 1
&& col == cursor_pos1.col
&& (cursor_pos1.row != cursor_pos2.row
|| cursor_pos1.col != cursor_pos2.col))
/* cursor in first but not in second */
textline[col] = '>';
else if (lnum == cursor_pos2.row + 1
&& col == cursor_pos2.col
&& (cursor_pos1.row != cursor_pos2.row
|| cursor_pos1.col != cursor_pos2.col))
/* cursor in second but not in first */
textline[col] = '<';
else if (cellattr1 != NULL && cellattr2 != NULL)
{
if ((cellattr1 + col)->width
!= (cellattr2 + col)->width)
textline[col] = 'w';
else if (!same_color(&(cellattr1 + col)->fg,
&(cellattr2 + col)->fg))
textline[col] = 'f';
else if (!same_color(&(cellattr1 + col)->bg,
&(cellattr2 + col)->bg))
textline[col] = 'b';
else if (vtermAttr2hl((cellattr1 + col)->attrs)
!= vtermAttr2hl(((cellattr2 + col)->attrs)))
textline[col] = 'a';
}
p1 += len1;
p2 += len2;
/* TODO: handle different width */
}
vim_free(line1);
while (col < width)
{
if (*p1 == NUL && *p2 == NUL)
textline[col] = '?';
else if (*p1 == NUL)
{
textline[col] = '+';
p2 += utfc_ptr2len(p2);
}
else
{
textline[col] = '-';
p1 += utfc_ptr2len(p1);
}
++col;
}
}
if (add_empty_scrollback(term, &term->tl_default_color,
term->tl_top_diff_rows) == OK)
ml_append(term->tl_top_diff_rows + lnum, textline, 0, FALSE);
++bot_lnum;
}
while (lnum + bot_lnum <= curbuf->b_ml.ml_line_count)
{
/* bottom part has more rows, fill with "+" */
for (i = 0; i < width; ++i)
textline[i] = '+';
if (add_empty_scrollback(term, &term->tl_default_color,
term->tl_top_diff_rows) == OK)
ml_append(term->tl_top_diff_rows + lnum, textline, 0, FALSE);
++lnum;
++bot_lnum;
}
term->tl_cols = width;
/* looks better without wrapping */
curwin->w_p_wrap = 0;
}
theend:
vim_free(textline);
vim_free(fname_tofree);
fclose(fd1);
if (fd2 != NULL)
fclose(fd2);
}
/*
* If the current buffer shows the output of term_dumpdiff(), swap the top and
* bottom files.
* Return FAIL when this is not possible.
*/
int
term_swap_diff()
{
term_T *term = curbuf->b_term;
linenr_T line_count;
linenr_T top_rows;
linenr_T bot_rows;
linenr_T bot_start;
linenr_T lnum;
char_u *p;
sb_line_T *sb_line;
if (term == NULL
|| !term_is_finished(curbuf)
|| term->tl_top_diff_rows == 0
|| term->tl_scrollback.ga_len == 0)
return FAIL;
line_count = curbuf->b_ml.ml_line_count;
top_rows = term->tl_top_diff_rows;
bot_rows = term->tl_bot_diff_rows;
bot_start = line_count - bot_rows;
sb_line = (sb_line_T *)term->tl_scrollback.ga_data;
/* move lines from top to above the bottom part */
for (lnum = 1; lnum <= top_rows; ++lnum)
{
p = vim_strsave(ml_get(1));
if (p == NULL)
return OK;
ml_append(bot_start, p, 0, FALSE);
ml_delete(1, FALSE);
vim_free(p);
}
/* move lines from bottom to the top */
for (lnum = 1; lnum <= bot_rows; ++lnum)
{
p = vim_strsave(ml_get(bot_start + lnum));
if (p == NULL)
return OK;
ml_delete(bot_start + lnum, FALSE);
ml_append(lnum - 1, p, 0, FALSE);
vim_free(p);
}
if (top_rows == bot_rows)
{
/* rows counts are equal, can swap cell properties */
for (lnum = 0; lnum < top_rows; ++lnum)
{
sb_line_T temp;
temp = *(sb_line + lnum);
*(sb_line + lnum) = *(sb_line + bot_start + lnum);
*(sb_line + bot_start + lnum) = temp;
}
}
else
{
size_t size = sizeof(sb_line_T) * term->tl_scrollback.ga_len;
sb_line_T *temp = (sb_line_T *)alloc((int)size);
/* need to copy cell properties into temp memory */
if (temp != NULL)
{
mch_memmove(temp, term->tl_scrollback.ga_data, size);
mch_memmove(term->tl_scrollback.ga_data,
temp + bot_start,
sizeof(sb_line_T) * bot_rows);
mch_memmove((sb_line_T *)term->tl_scrollback.ga_data + bot_rows,
temp + top_rows,
sizeof(sb_line_T) * (line_count - top_rows - bot_rows));
mch_memmove((sb_line_T *)term->tl_scrollback.ga_data
+ line_count - top_rows,
temp,
sizeof(sb_line_T) * top_rows);
vim_free(temp);
}
}
term->tl_top_diff_rows = bot_rows;
term->tl_bot_diff_rows = top_rows;
update_screen(NOT_VALID);
return OK;
}
/*
* "term_dumpdiff(filename, filename, options)" function
*/
void
f_term_dumpdiff(typval_T *argvars, typval_T *rettv)
{
term_load_dump(argvars, rettv, TRUE);
}
/*
* "term_dumpload(filename, options)" function
*/
void
f_term_dumpload(typval_T *argvars, typval_T *rettv)
{
term_load_dump(argvars, rettv, FALSE);
}
/*
* "term_getaltscreen(buf)" function
*/
void
f_term_getaltscreen(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getaltscreen()");
if (buf == NULL)
return;
rettv->vval.v_number = buf->b_term->tl_using_altscreen;
}
/*
* "term_getattr(attr, name)" function
*/
void
f_term_getattr(typval_T *argvars, typval_T *rettv)
{
int attr;
size_t i;
char_u *name;
static struct {
char *name;
int attr;
} attrs[] = {
{"bold", HL_BOLD},
{"italic", HL_ITALIC},
{"underline", HL_UNDERLINE},
{"strike", HL_STRIKETHROUGH},
{"reverse", HL_INVERSE},
};
attr = tv_get_number(&argvars[0]);
name = tv_get_string_chk(&argvars[1]);
if (name == NULL)
return;
for (i = 0; i < sizeof(attrs)/sizeof(attrs[0]); ++i)
if (STRCMP(name, attrs[i].name) == 0)
{
rettv->vval.v_number = (attr & attrs[i].attr) != 0 ? 1 : 0;
break;
}
}
/*
* "term_getcursor(buf)" function
*/
void
f_term_getcursor(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getcursor()");
term_T *term;
list_T *l;
dict_T *d;
if (rettv_list_alloc(rettv) == FAIL)
return;
if (buf == NULL)
return;
term = buf->b_term;
l = rettv->vval.v_list;
list_append_number(l, term->tl_cursor_pos.row + 1);
list_append_number(l, term->tl_cursor_pos.col + 1);
d = dict_alloc();
if (d != NULL)
{
dict_add_number(d, "visible", term->tl_cursor_visible);
dict_add_number(d, "blink", blink_state_is_inverted()
? !term->tl_cursor_blink : term->tl_cursor_blink);
dict_add_number(d, "shape", term->tl_cursor_shape);
dict_add_string(d, "color", cursor_color_get(term->tl_cursor_color));
list_append_dict(l, d);
}
}
/*
* "term_getjob(buf)" function
*/
void
f_term_getjob(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getjob()");
if (buf == NULL)
{
rettv->v_type = VAR_SPECIAL;
rettv->vval.v_number = VVAL_NULL;
return;
}
rettv->v_type = VAR_JOB;
rettv->vval.v_job = buf->b_term->tl_job;
if (rettv->vval.v_job != NULL)
++rettv->vval.v_job->jv_refcount;
}
static int
get_row_number(typval_T *tv, term_T *term)
{
if (tv->v_type == VAR_STRING
&& tv->vval.v_string != NULL
&& STRCMP(tv->vval.v_string, ".") == 0)
return term->tl_cursor_pos.row;
return (int)tv_get_number(tv) - 1;
}
/*
* "term_getline(buf, row)" function
*/
void
f_term_getline(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getline()");
term_T *term;
int row;
rettv->v_type = VAR_STRING;
if (buf == NULL)
return;
term = buf->b_term;
row = get_row_number(&argvars[1], term);
if (term->tl_vterm == NULL)
{
linenr_T lnum = row + term->tl_scrollback_scrolled + 1;
/* vterm is finished, get the text from the buffer */
if (lnum > 0 && lnum <= buf->b_ml.ml_line_count)
rettv->vval.v_string = vim_strsave(ml_get_buf(buf, lnum, FALSE));
}
else
{
VTermScreen *screen = vterm_obtain_screen(term->tl_vterm);
VTermRect rect;
int len;
char_u *p;
if (row < 0 || row >= term->tl_rows)
return;
len = term->tl_cols * MB_MAXBYTES + 1;
p = alloc(len);
if (p == NULL)
return;
rettv->vval.v_string = p;
rect.start_col = 0;
rect.end_col = term->tl_cols;
rect.start_row = row;
rect.end_row = row + 1;
p[vterm_screen_get_text(screen, (char *)p, len, rect)] = NUL;
}
}
/*
* "term_getscrolled(buf)" function
*/
void
f_term_getscrolled(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getscrolled()");
if (buf == NULL)
return;
rettv->vval.v_number = buf->b_term->tl_scrollback_scrolled;
}
/*
* "term_getsize(buf)" function
*/
void
f_term_getsize(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getsize()");
list_T *l;
if (rettv_list_alloc(rettv) == FAIL)
return;
if (buf == NULL)
return;
l = rettv->vval.v_list;
list_append_number(l, buf->b_term->tl_rows);
list_append_number(l, buf->b_term->tl_cols);
}
/*
* "term_setsize(buf, rows, cols)" function
*/
void
f_term_setsize(typval_T *argvars UNUSED, typval_T *rettv UNUSED)
{
buf_T *buf = term_get_buf(argvars, "term_setsize()");
term_T *term;
varnumber_T rows, cols;
if (buf == NULL)
{
EMSG(_("E955: Not a terminal buffer"));
return;
}
if (buf->b_term->tl_vterm == NULL)
return;
term = buf->b_term;
rows = tv_get_number(&argvars[1]);
rows = rows <= 0 ? term->tl_rows : rows;
cols = tv_get_number(&argvars[2]);
cols = cols <= 0 ? term->tl_cols : cols;
vterm_set_size(term->tl_vterm, rows, cols);
/* handle_resize() will resize the windows */
/* Get and remember the size we ended up with. Update the pty. */
vterm_get_size(term->tl_vterm, &term->tl_rows, &term->tl_cols);
term_report_winsize(term, term->tl_rows, term->tl_cols);
}
/*
* "term_getstatus(buf)" function
*/
void
f_term_getstatus(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getstatus()");
term_T *term;
char_u val[100];
rettv->v_type = VAR_STRING;
if (buf == NULL)
return;
term = buf->b_term;
if (term_job_running(term))
STRCPY(val, "running");
else
STRCPY(val, "finished");
if (term->tl_normal_mode)
STRCAT(val, ",normal");
rettv->vval.v_string = vim_strsave(val);
}
/*
* "term_gettitle(buf)" function
*/
void
f_term_gettitle(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_gettitle()");
rettv->v_type = VAR_STRING;
if (buf == NULL)
return;
if (buf->b_term->tl_title != NULL)
rettv->vval.v_string = vim_strsave(buf->b_term->tl_title);
}
/*
* "term_gettty(buf)" function
*/
void
f_term_gettty(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_gettty()");
char_u *p = NULL;
int num = 0;
rettv->v_type = VAR_STRING;
if (buf == NULL)
return;
if (argvars[1].v_type != VAR_UNKNOWN)
num = tv_get_number(&argvars[1]);
switch (num)
{
case 0:
if (buf->b_term->tl_job != NULL)
p = buf->b_term->tl_job->jv_tty_out;
break;
case 1:
if (buf->b_term->tl_job != NULL)
p = buf->b_term->tl_job->jv_tty_in;
break;
default:
EMSG2(_(e_invarg2), tv_get_string(&argvars[1]));
return;
}
if (p != NULL)
rettv->vval.v_string = vim_strsave(p);
}
/*
* "term_list()" function
*/
void
f_term_list(typval_T *argvars UNUSED, typval_T *rettv)
{
term_T *tp;
list_T *l;
if (rettv_list_alloc(rettv) == FAIL || first_term == NULL)
return;
l = rettv->vval.v_list;
for (tp = first_term; tp != NULL; tp = tp->tl_next)
if (tp != NULL && tp->tl_buffer != NULL)
if (list_append_number(l,
(varnumber_T)tp->tl_buffer->b_fnum) == FAIL)
return;
}
/*
* "term_scrape(buf, row)" function
*/
void
f_term_scrape(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_scrape()");
VTermScreen *screen = NULL;
VTermPos pos;
list_T *l;
term_T *term;
char_u *p;
sb_line_T *line;
if (rettv_list_alloc(rettv) == FAIL)
return;
if (buf == NULL)
return;
term = buf->b_term;
l = rettv->vval.v_list;
pos.row = get_row_number(&argvars[1], term);
if (term->tl_vterm != NULL)
{
screen = vterm_obtain_screen(term->tl_vterm);
p = NULL;
line = NULL;
}
else
{
linenr_T lnum = pos.row + term->tl_scrollback_scrolled;
if (lnum < 0 || lnum >= term->tl_scrollback.ga_len)
return;
p = ml_get_buf(buf, lnum + 1, FALSE);
line = (sb_line_T *)term->tl_scrollback.ga_data + lnum;
}
for (pos.col = 0; pos.col < term->tl_cols; )
{
dict_T *dcell;
int width;
VTermScreenCellAttrs attrs;
VTermColor fg, bg;
char_u rgb[8];
char_u mbs[MB_MAXBYTES * VTERM_MAX_CHARS_PER_CELL + 1];
int off = 0;
int i;
if (screen == NULL)
{
cellattr_T *cellattr;
int len;
/* vterm has finished, get the cell from scrollback */
if (pos.col >= line->sb_cols)
break;
cellattr = line->sb_cells + pos.col;
width = cellattr->width;
attrs = cellattr->attrs;
fg = cellattr->fg;
bg = cellattr->bg;
len = MB_PTR2LEN(p);
mch_memmove(mbs, p, len);
mbs[len] = NUL;
p += len;
}
else
{
VTermScreenCell cell;
if (vterm_screen_get_cell(screen, pos, &cell) == 0)
break;
for (i = 0; i < VTERM_MAX_CHARS_PER_CELL; ++i)
{
if (cell.chars[i] == 0)
break;
off += (*utf_char2bytes)((int)cell.chars[i], mbs + off);
}
mbs[off] = NUL;
width = cell.width;
attrs = cell.attrs;
fg = cell.fg;
bg = cell.bg;
}
dcell = dict_alloc();
if (dcell == NULL)
break;
list_append_dict(l, dcell);
dict_add_string(dcell, "chars", mbs);
vim_snprintf((char *)rgb, 8, "#%02x%02x%02x",
fg.red, fg.green, fg.blue);
dict_add_string(dcell, "fg", rgb);
vim_snprintf((char *)rgb, 8, "#%02x%02x%02x",
bg.red, bg.green, bg.blue);
dict_add_string(dcell, "bg", rgb);
dict_add_number(dcell, "attr", cell2attr(attrs, fg, bg));
dict_add_number(dcell, "width", width);
++pos.col;
if (width == 2)
++pos.col;
}
}
/*
* "term_sendkeys(buf, keys)" function
*/
void
f_term_sendkeys(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_sendkeys()");
char_u *msg;
term_T *term;
rettv->v_type = VAR_UNKNOWN;
if (buf == NULL)
return;
msg = tv_get_string_chk(&argvars[1]);
if (msg == NULL)
return;
term = buf->b_term;
if (term->tl_vterm == NULL)
return;
while (*msg != NUL)
{
int c;
if (*msg == K_SPECIAL && msg[1] != NUL && msg[2] != NUL)
{
c = TO_SPECIAL(msg[1], msg[2]);
msg += 3;
}
else
{
c = PTR2CHAR(msg);
msg += MB_CPTR2LEN(msg);
}
send_keys_to_term(term, c, FALSE);
}
}
#if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS) || defined(PROTO)
/*
* "term_getansicolors(buf)" function
*/
void
f_term_getansicolors(typval_T *argvars, typval_T *rettv)
{
buf_T *buf = term_get_buf(argvars, "term_getansicolors()");
term_T *term;
VTermState *state;
VTermColor color;
char_u hexbuf[10];
int index;
list_T *list;
if (rettv_list_alloc(rettv) == FAIL)
return;
if (buf == NULL)
return;
term = buf->b_term;
if (term->tl_vterm == NULL)
return;
list = rettv->vval.v_list;
state = vterm_obtain_state(term->tl_vterm);
for (index = 0; index < 16; index++)
{
vterm_state_get_palette_color(state, index, &color);
sprintf((char *)hexbuf, "#%02x%02x%02x",
color.red, color.green, color.blue);
if (list_append_string(list, hexbuf, 7) == FAIL)
return;
}
}
/*
* "term_setansicolors(buf, list)" function
*/
void
f_term_setansicolors(typval_T *argvars, typval_T *rettv UNUSED)
{
buf_T *buf = term_get_buf(argvars, "term_setansicolors()");
term_T *term;
if (buf == NULL)
return;
term = buf->b_term;
if (term->tl_vterm == NULL)
return;
if (argvars[1].v_type != VAR_LIST || argvars[1].vval.v_list == NULL)
{
EMSG(_(e_listreq));
return;
}
if (set_ansi_colors_list(term->tl_vterm, argvars[1].vval.v_list) == FAIL)
EMSG(_(e_invarg));
}
#endif
/*
* "term_setrestore(buf, command)" function
*/
void
f_term_setrestore(typval_T *argvars UNUSED, typval_T *rettv UNUSED)
{
#if defined(FEAT_SESSION)
buf_T *buf = term_get_buf(argvars, "term_setrestore()");
term_T *term;
char_u *cmd;
if (buf == NULL)
return;
term = buf->b_term;
vim_free(term->tl_command);
cmd = tv_get_string_chk(&argvars[1]);
if (cmd != NULL)
term->tl_command = vim_strsave(cmd);
else
term->tl_command = NULL;
#endif
}
/*
* "term_setkill(buf, how)" function
*/
void
f_term_setkill(typval_T *argvars UNUSED, typval_T *rettv UNUSED)
{
buf_T *buf = term_get_buf(argvars, "term_setkill()");
term_T *term;
char_u *how;
if (buf == NULL)
return;
term = buf->b_term;
vim_free(term->tl_kill);
how = tv_get_string_chk(&argvars[1]);
if (how != NULL)
term->tl_kill = vim_strsave(how);
else
term->tl_kill = NULL;
}
/*
* "term_start(command, options)" function
*/
void
f_term_start(typval_T *argvars, typval_T *rettv)
{
jobopt_T opt;
buf_T *buf;
init_job_options(&opt);
if (argvars[1].v_type != VAR_UNKNOWN
&& get_job_options(&argvars[1], &opt,
JO_TIMEOUT_ALL + JO_STOPONEXIT
+ JO_CALLBACK + JO_OUT_CALLBACK + JO_ERR_CALLBACK
+ JO_EXIT_CB + JO_CLOSE_CALLBACK + JO_OUT_IO,
JO2_TERM_NAME + JO2_TERM_FINISH + JO2_HIDDEN + JO2_TERM_OPENCMD
+ JO2_TERM_COLS + JO2_TERM_ROWS + JO2_VERTICAL + JO2_CURWIN
+ JO2_CWD + JO2_ENV + JO2_EOF_CHARS
+ JO2_NORESTORE + JO2_TERM_KILL
+ JO2_ANSI_COLORS) == FAIL)
return;
buf = term_start(&argvars[0], NULL, &opt, 0);
if (buf != NULL && buf->b_term != NULL)
rettv->vval.v_number = buf->b_fnum;
}
/*
* "term_wait" function
*/
void
f_term_wait(typval_T *argvars, typval_T *rettv UNUSED)
{
buf_T *buf = term_get_buf(argvars, "term_wait()");
if (buf == NULL)
return;
if (buf->b_term->tl_job == NULL)
{
ch_log(NULL, "term_wait(): no job to wait for");
return;
}
if (buf->b_term->tl_job->jv_channel == NULL)
/* channel is closed, nothing to do */
return;
/* Get the job status, this will detect a job that finished. */
if (!buf->b_term->tl_job->jv_channel->ch_keep_open
&& STRCMP(job_status(buf->b_term->tl_job), "dead") == 0)
{
/* The job is dead, keep reading channel I/O until the channel is
* closed. buf->b_term may become NULL if the terminal was closed while
* waiting. */
ch_log(NULL, "term_wait(): waiting for channel to close");
while (buf->b_term != NULL && !buf->b_term->tl_channel_closed)
{
mch_check_messages();
parse_queued_messages();
ui_delay(10L, FALSE);
if (!buf_valid(buf))
/* If the terminal is closed when the channel is closed the
* buffer disappears. */
break;
}
mch_check_messages();
parse_queued_messages();
}
else
{
long wait = 10L;
mch_check_messages();
parse_queued_messages();
/* Wait for some time for any channel I/O. */
if (argvars[1].v_type != VAR_UNKNOWN)
wait = tv_get_number(&argvars[1]);
ui_delay(wait, TRUE);
mch_check_messages();
/* Flushing messages on channels is hopefully sufficient.
* TODO: is there a better way? */
parse_queued_messages();
}
}
/*
* Called when a channel has sent all the lines to a terminal.
* Send a CTRL-D to mark the end of the text.
*/
void
term_send_eof(channel_T *ch)
{
term_T *term;
for (term = first_term; term != NULL; term = term->tl_next)
if (term->tl_job == ch->ch_job)
{
if (term->tl_eof_chars != NULL)
{
channel_send(ch, PART_IN, term->tl_eof_chars,
(int)STRLEN(term->tl_eof_chars), NULL);
channel_send(ch, PART_IN, (char_u *)"\r", 1, NULL);
}
# ifdef WIN3264
else
/* Default: CTRL-D */
channel_send(ch, PART_IN, (char_u *)"\004\r", 2, NULL);
# endif
}
}
job_T *
term_getjob(term_T *term)
{
return term != NULL ? term->tl_job : NULL;
}
# if defined(WIN3264) || defined(PROTO)
/**************************************
* 2. MS-Windows implementation.
*/
# ifndef PROTO
#define WINPTY_SPAWN_FLAG_AUTO_SHUTDOWN 1ul
#define WINPTY_SPAWN_FLAG_EXIT_AFTER_SHUTDOWN 2ull
#define WINPTY_MOUSE_MODE_FORCE 2
void* (*winpty_config_new)(UINT64, void*);
void* (*winpty_open)(void*, void*);
void* (*winpty_spawn_config_new)(UINT64, void*, LPCWSTR, void*, void*, void*);
BOOL (*winpty_spawn)(void*, void*, HANDLE*, HANDLE*, DWORD*, void*);
void (*winpty_config_set_mouse_mode)(void*, int);
void (*winpty_config_set_initial_size)(void*, int, int);
LPCWSTR (*winpty_conin_name)(void*);
LPCWSTR (*winpty_conout_name)(void*);
LPCWSTR (*winpty_conerr_name)(void*);
void (*winpty_free)(void*);
void (*winpty_config_free)(void*);
void (*winpty_spawn_config_free)(void*);
void (*winpty_error_free)(void*);
LPCWSTR (*winpty_error_msg)(void*);
BOOL (*winpty_set_size)(void*, int, int, void*);
HANDLE (*winpty_agent_process)(void*);
#define WINPTY_DLL "winpty.dll"
static HINSTANCE hWinPtyDLL = NULL;
# endif
static int
dyn_winpty_init(int verbose)
{
int i;
static struct
{
char *name;
FARPROC *ptr;
} winpty_entry[] =
{
{"winpty_conerr_name", (FARPROC*)&winpty_conerr_name},
{"winpty_config_free", (FARPROC*)&winpty_config_free},
{"winpty_config_new", (FARPROC*)&winpty_config_new},
{"winpty_config_set_mouse_mode",
(FARPROC*)&winpty_config_set_mouse_mode},
{"winpty_config_set_initial_size",
(FARPROC*)&winpty_config_set_initial_size},
{"winpty_conin_name", (FARPROC*)&winpty_conin_name},
{"winpty_conout_name", (FARPROC*)&winpty_conout_name},
{"winpty_error_free", (FARPROC*)&winpty_error_free},
{"winpty_free", (FARPROC*)&winpty_free},
{"winpty_open", (FARPROC*)&winpty_open},
{"winpty_spawn", (FARPROC*)&winpty_spawn},
{"winpty_spawn_config_free", (FARPROC*)&winpty_spawn_config_free},
{"winpty_spawn_config_new", (FARPROC*)&winpty_spawn_config_new},
{"winpty_error_msg", (FARPROC*)&winpty_error_msg},
{"winpty_set_size", (FARPROC*)&winpty_set_size},
{"winpty_agent_process", (FARPROC*)&winpty_agent_process},
{NULL, NULL}
};
/* No need to initialize twice. */
if (hWinPtyDLL)
return OK;
/* Load winpty.dll, prefer using the 'winptydll' option, fall back to just
* winpty.dll. */
if (*p_winptydll != NUL)
hWinPtyDLL = vimLoadLib((char *)p_winptydll);
if (!hWinPtyDLL)
hWinPtyDLL = vimLoadLib(WINPTY_DLL);
if (!hWinPtyDLL)
{
if (verbose)
EMSG2(_(e_loadlib), *p_winptydll != NUL ? p_winptydll
: (char_u *)WINPTY_DLL);
return FAIL;
}
for (i = 0; winpty_entry[i].name != NULL
&& winpty_entry[i].ptr != NULL; ++i)
{
if ((*winpty_entry[i].ptr = (FARPROC)GetProcAddress(hWinPtyDLL,
winpty_entry[i].name)) == NULL)
{
if (verbose)
EMSG2(_(e_loadfunc), winpty_entry[i].name);
return FAIL;
}
}
return OK;
}
/*
* Create a new terminal of "rows" by "cols" cells.
* Store a reference in "term".
* Return OK or FAIL.
*/
static int
term_and_job_init(
term_T *term,
typval_T *argvar,
char **argv UNUSED,
jobopt_T *opt,
jobopt_T *orig_opt)
{
WCHAR *cmd_wchar = NULL;
WCHAR *cwd_wchar = NULL;
WCHAR *env_wchar = NULL;
channel_T *channel = NULL;
job_T *job = NULL;
DWORD error;
HANDLE jo = NULL;
HANDLE child_process_handle;
HANDLE child_thread_handle;
void *winpty_err = NULL;
void *spawn_config = NULL;
garray_T ga_cmd, ga_env;
char_u *cmd = NULL;
if (dyn_winpty_init(TRUE) == FAIL)
return FAIL;
ga_init2(&ga_cmd, (int)sizeof(char*), 20);
ga_init2(&ga_env, (int)sizeof(char*), 20);
if (argvar->v_type == VAR_STRING)
{
cmd = argvar->vval.v_string;
}
else if (argvar->v_type == VAR_LIST)
{
if (win32_build_cmd(argvar->vval.v_list, &ga_cmd) == FAIL)
goto failed;
cmd = ga_cmd.ga_data;
}
if (cmd == NULL || *cmd == NUL)
{
EMSG(_(e_invarg));
goto failed;
}
cmd_wchar = enc_to_utf16(cmd, NULL);
ga_clear(&ga_cmd);
if (cmd_wchar == NULL)
goto failed;
if (opt->jo_cwd != NULL)
cwd_wchar = enc_to_utf16(opt->jo_cwd, NULL);
win32_build_env(opt->jo_env, &ga_env, TRUE);
env_wchar = ga_env.ga_data;
term->tl_winpty_config = winpty_config_new(0, &winpty_err);
if (term->tl_winpty_config == NULL)
goto failed;
winpty_config_set_mouse_mode(term->tl_winpty_config,
WINPTY_MOUSE_MODE_FORCE);
winpty_config_set_initial_size(term->tl_winpty_config,
term->tl_cols, term->tl_rows);
term->tl_winpty = winpty_open(term->tl_winpty_config, &winpty_err);
if (term->tl_winpty == NULL)
goto failed;
spawn_config = winpty_spawn_config_new(
WINPTY_SPAWN_FLAG_AUTO_SHUTDOWN |
WINPTY_SPAWN_FLAG_EXIT_AFTER_SHUTDOWN,
NULL,
cmd_wchar,
cwd_wchar,
env_wchar,
&winpty_err);
if (spawn_config == NULL)
goto failed;
channel = add_channel();
if (channel == NULL)
goto failed;
job = job_alloc();
if (job == NULL)
goto failed;
if (argvar->v_type == VAR_STRING)
{
int argc;
build_argv_from_string(cmd, &job->jv_argv, &argc);
}
else
{
int argc;
build_argv_from_list(argvar->vval.v_list, &job->jv_argv, &argc);
}
if (opt->jo_set & JO_IN_BUF)
job->jv_in_buf = buflist_findnr(opt->jo_io_buf[PART_IN]);
if (!winpty_spawn(term->tl_winpty, spawn_config, &child_process_handle,
&child_thread_handle, &error, &winpty_err))
goto failed;
channel_set_pipes(channel,
(sock_T)CreateFileW(
winpty_conin_name(term->tl_winpty),
GENERIC_WRITE, 0, NULL,
OPEN_EXISTING, 0, NULL),
(sock_T)CreateFileW(
winpty_conout_name(term->tl_winpty),
GENERIC_READ, 0, NULL,
OPEN_EXISTING, 0, NULL),
(sock_T)CreateFileW(
winpty_conerr_name(term->tl_winpty),
GENERIC_READ, 0, NULL,
OPEN_EXISTING, 0, NULL));
/* Write lines with CR instead of NL. */
channel->ch_write_text_mode = TRUE;
jo = CreateJobObject(NULL, NULL);
if (jo == NULL)
goto failed;
if (!AssignProcessToJobObject(jo, child_process_handle))
{
/* Failed, switch the way to terminate process with TerminateProcess. */
CloseHandle(jo);
jo = NULL;
}
winpty_spawn_config_free(spawn_config);
vim_free(cmd_wchar);
vim_free(cwd_wchar);
vim_free(env_wchar);
if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)
goto failed;
#if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS)
if (opt->jo_set2 & JO2_ANSI_COLORS)
set_vterm_palette(term->tl_vterm, opt->jo_ansi_colors);
else
init_vterm_ansi_colors(term->tl_vterm);
#endif
channel_set_job(channel, job, opt);
job_set_options(job, opt);
job->jv_channel = channel;
job->jv_proc_info.hProcess = child_process_handle;
job->jv_proc_info.dwProcessId = GetProcessId(child_process_handle);
job->jv_job_object = jo;
job->jv_status = JOB_STARTED;
job->jv_tty_in = utf16_to_enc(
(short_u*)winpty_conin_name(term->tl_winpty), NULL);
job->jv_tty_out = utf16_to_enc(
(short_u*)winpty_conout_name(term->tl_winpty), NULL);
++job->jv_refcount;
term->tl_job = job;
/* Redirecting stdout and stderr doesn't work at the job level. Instead
* open the file here and handle it in. opt->jo_io was changed in
* setup_job_options(), use the original flags here. */
if (orig_opt->jo_io[PART_OUT] == JIO_FILE)
{
char_u *fname = opt->jo_io_name[PART_OUT];
ch_log(channel, "Opening output file %s", fname);
term->tl_out_fd = mch_fopen((char *)fname, WRITEBIN);
if (term->tl_out_fd == NULL)
EMSG2(_(e_notopen), fname);
}
return OK;
failed:
ga_clear(&ga_cmd);
ga_clear(&ga_env);
vim_free(cmd_wchar);
vim_free(cwd_wchar);
if (spawn_config != NULL)
winpty_spawn_config_free(spawn_config);
if (channel != NULL)
channel_clear(channel);
if (job != NULL)
{
job->jv_channel = NULL;
job_cleanup(job);
}
term->tl_job = NULL;
if (jo != NULL)
CloseHandle(jo);
if (term->tl_winpty != NULL)
winpty_free(term->tl_winpty);
term->tl_winpty = NULL;
if (term->tl_winpty_config != NULL)
winpty_config_free(term->tl_winpty_config);
term->tl_winpty_config = NULL;
if (winpty_err != NULL)
{
char_u *msg = utf16_to_enc(
(short_u *)winpty_error_msg(winpty_err), NULL);
EMSG(msg);
winpty_error_free(winpty_err);
}
return FAIL;
}
static int
create_pty_only(term_T *term, jobopt_T *options)
{
HANDLE hPipeIn = INVALID_HANDLE_VALUE;
HANDLE hPipeOut = INVALID_HANDLE_VALUE;
char in_name[80], out_name[80];
channel_T *channel = NULL;
if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)
return FAIL;
vim_snprintf(in_name, sizeof(in_name), "\\\\.\\pipe\\vim-%d-in-%d",
GetCurrentProcessId(),
curbuf->b_fnum);
hPipeIn = CreateNamedPipe(in_name, PIPE_ACCESS_OUTBOUND,
PIPE_TYPE_MESSAGE | PIPE_NOWAIT,
PIPE_UNLIMITED_INSTANCES,
0, 0, NMPWAIT_NOWAIT, NULL);
if (hPipeIn == INVALID_HANDLE_VALUE)
goto failed;
vim_snprintf(out_name, sizeof(out_name), "\\\\.\\pipe\\vim-%d-out-%d",
GetCurrentProcessId(),
curbuf->b_fnum);
hPipeOut = CreateNamedPipe(out_name, PIPE_ACCESS_INBOUND,
PIPE_TYPE_MESSAGE | PIPE_NOWAIT,
PIPE_UNLIMITED_INSTANCES,
0, 0, 0, NULL);
if (hPipeOut == INVALID_HANDLE_VALUE)
goto failed;
ConnectNamedPipe(hPipeIn, NULL);
ConnectNamedPipe(hPipeOut, NULL);
term->tl_job = job_alloc();
if (term->tl_job == NULL)
goto failed;
++term->tl_job->jv_refcount;
/* behave like the job is already finished */
term->tl_job->jv_status = JOB_FINISHED;
channel = add_channel();
if (channel == NULL)
goto failed;
term->tl_job->jv_channel = channel;
channel->ch_keep_open = TRUE;
channel->ch_named_pipe = TRUE;
channel_set_pipes(channel,
(sock_T)hPipeIn,
(sock_T)hPipeOut,
(sock_T)hPipeOut);
channel_set_job(channel, term->tl_job, options);
term->tl_job->jv_tty_in = vim_strsave((char_u*)in_name);
term->tl_job->jv_tty_out = vim_strsave((char_u*)out_name);
return OK;
failed:
if (hPipeIn != NULL)
CloseHandle(hPipeIn);
if (hPipeOut != NULL)
CloseHandle(hPipeOut);
return FAIL;
}
/*
* Free the terminal emulator part of "term".
*/
static void
term_free_vterm(term_T *term)
{
if (term->tl_winpty != NULL)
winpty_free(term->tl_winpty);
term->tl_winpty = NULL;
if (term->tl_winpty_config != NULL)
winpty_config_free(term->tl_winpty_config);
term->tl_winpty_config = NULL;
if (term->tl_vterm != NULL)
vterm_free(term->tl_vterm);
term->tl_vterm = NULL;
}
/*
* Report the size to the terminal.
*/
static void
term_report_winsize(term_T *term, int rows, int cols)
{
if (term->tl_winpty)
winpty_set_size(term->tl_winpty, cols, rows, NULL);
}
int
terminal_enabled(void)
{
return dyn_winpty_init(FALSE) == OK;
}
# else
/**************************************
* 3. Unix-like implementation.
*/
/*
* Create a new terminal of "rows" by "cols" cells.
* Start job for "cmd".
* Store the pointers in "term".
* When "argv" is not NULL then "argvar" is not used.
* Return OK or FAIL.
*/
static int
term_and_job_init(
term_T *term,
typval_T *argvar,
char **argv,
jobopt_T *opt,
jobopt_T *orig_opt UNUSED)
{
if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)
return FAIL;
#if defined(FEAT_GUI) || defined(FEAT_TERMGUICOLORS)
if (opt->jo_set2 & JO2_ANSI_COLORS)
set_vterm_palette(term->tl_vterm, opt->jo_ansi_colors);
else
init_vterm_ansi_colors(term->tl_vterm);
#endif
/* This may change a string in "argvar". */
term->tl_job = job_start(argvar, argv, opt, TRUE);
if (term->tl_job != NULL)
++term->tl_job->jv_refcount;
return term->tl_job != NULL
&& term->tl_job->jv_channel != NULL
&& term->tl_job->jv_status != JOB_FAILED ? OK : FAIL;
}
static int
create_pty_only(term_T *term, jobopt_T *opt)
{
if (create_vterm(term, term->tl_rows, term->tl_cols) == FAIL)
return FAIL;
term->tl_job = job_alloc();
if (term->tl_job == NULL)
return FAIL;
++term->tl_job->jv_refcount;
/* behave like the job is already finished */
term->tl_job->jv_status = JOB_FINISHED;
return mch_create_pty_channel(term->tl_job, opt);
}
/*
* Free the terminal emulator part of "term".
*/
static void
term_free_vterm(term_T *term)
{
if (term->tl_vterm != NULL)
vterm_free(term->tl_vterm);
term->tl_vterm = NULL;
}
/*
* Report the size to the terminal.
*/
static void
term_report_winsize(term_T *term, int rows, int cols)
{
/* Use an ioctl() to report the new window size to the job. */
if (term->tl_job != NULL && term->tl_job->jv_channel != NULL)
{
int fd = -1;
int part;
for (part = PART_OUT; part < PART_COUNT; ++part)
{
fd = term->tl_job->jv_channel->ch_part[part].ch_fd;
if (isatty(fd))
break;
}
if (part < PART_COUNT && mch_report_winsize(fd, rows, cols) == OK)
mch_signal_job(term->tl_job, (char_u *)"winch");
}
}
# endif
#endif /* FEAT_TERMINAL */
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_528_3 |
crossvul-cpp_data_bad_3185_0 | /* radare - LGPL - Copyright 2011-2016 - pancake */
#include <r_cons.h>
#include <r_types.h>
#include <r_util.h>
#include <r_lib.h>
#include <r_bin.h>
#include "dex/dex.h"
#define r_hash_adler32 __adler32
#include "../../hash/adler32.c"
extern struct r_bin_dbginfo_t r_bin_dbginfo_dex;
#define DEBUG_PRINTF 0
#if DEBUG_PRINTF
#define dprintf eprintf
#else
#define dprintf if (0)eprintf
#endif
static bool dexdump = false;
static Sdb *mdb = NULL;
static Sdb *cdb = NULL; // TODO: remove if it is not used
static char *getstr(RBinDexObj *bin, int idx) {
ut8 buf[6];
ut64 len;
int uleblen;
if (!bin || idx < 0 || idx >= bin->header.strings_size ||
!bin->strings) {
return NULL;
}
if (bin->strings[idx] >= bin->size) {
return NULL;
}
if (r_buf_read_at (bin->b, bin->strings[idx], buf, sizeof (buf)) < 1) {
return NULL;
}
uleblen = r_uleb128 (buf, sizeof (buf), &len) - buf;
if (!uleblen || uleblen >= bin->size) {
return NULL;
}
if (!len || len >= bin->size) {
return NULL;
}
// TODO: improve this ugly fix
char c = 'a';
while (c) {
ut64 offset = bin->strings[idx] + uleblen + len;
if (offset >= bin->size || offset < len) {
return NULL;
}
r_buf_read_at (bin->b, offset, (ut8*)&c, 1);
len++;
}
if ((int)len > 0 && len < R_BIN_SIZEOF_STRINGS) {
char *str = calloc (1, len + 1);
if (str) {
r_buf_read_at (bin->b, (bin->strings[idx]) + uleblen,
(ut8 *)str, len);
str[len] = 0;
return str;
}
}
return NULL;
}
static int countOnes(ut32 val) {
int count = 0;
val = val - ((val >> 1) & 0x55555555);
val = (val & 0x33333333) + ((val >> 2) & 0x33333333);
count = (((val + (val >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24;
return count;
}
typedef enum {
kAccessForClass = 0,
kAccessForMethod = 1,
kAccessForField = 2,
kAccessForMAX
} AccessFor;
static char *createAccessFlagStr(ut32 flags, AccessFor forWhat) {
#define NUM_FLAGS 18
static const char* kAccessStrings[kAccessForMAX][NUM_FLAGS] = {
{
/* class, inner class */
"PUBLIC", /* 0x0001 */
"PRIVATE", /* 0x0002 */
"PROTECTED", /* 0x0004 */
"STATIC", /* 0x0008 */
"FINAL", /* 0x0010 */
"?", /* 0x0020 */
"?", /* 0x0040 */
"?", /* 0x0080 */
"?", /* 0x0100 */
"INTERFACE", /* 0x0200 */
"ABSTRACT", /* 0x0400 */
"?", /* 0x0800 */
"SYNTHETIC", /* 0x1000 */
"ANNOTATION", /* 0x2000 */
"ENUM", /* 0x4000 */
"?", /* 0x8000 */
"VERIFIED", /* 0x10000 */
"OPTIMIZED", /* 0x20000 */
},
{
/* method */
"PUBLIC", /* 0x0001 */
"PRIVATE", /* 0x0002 */
"PROTECTED", /* 0x0004 */
"STATIC", /* 0x0008 */
"FINAL", /* 0x0010 */
"SYNCHRONIZED", /* 0x0020 */
"BRIDGE", /* 0x0040 */
"VARARGS", /* 0x0080 */
"NATIVE", /* 0x0100 */
"?", /* 0x0200 */
"ABSTRACT", /* 0x0400 */
"STRICT", /* 0x0800 */
"SYNTHETIC", /* 0x1000 */
"?", /* 0x2000 */
"?", /* 0x4000 */
"MIRANDA", /* 0x8000 */
"CONSTRUCTOR", /* 0x10000 */
"DECLARED_SYNCHRONIZED", /* 0x20000 */
},
{
/* field */
"PUBLIC", /* 0x0001 */
"PRIVATE", /* 0x0002 */
"PROTECTED", /* 0x0004 */
"STATIC", /* 0x0008 */
"FINAL", /* 0x0010 */
"?", /* 0x0020 */
"VOLATILE", /* 0x0040 */
"TRANSIENT", /* 0x0080 */
"?", /* 0x0100 */
"?", /* 0x0200 */
"?", /* 0x0400 */
"?", /* 0x0800 */
"SYNTHETIC", /* 0x1000 */
"?", /* 0x2000 */
"ENUM", /* 0x4000 */
"?", /* 0x8000 */
"?", /* 0x10000 */
"?", /* 0x20000 */
},
};
const int kLongest = 21;
int i, count;
char* str;
char* cp;
count = countOnes(flags);
// XXX check if this allocation is safe what if all the arithmetic
// produces a huge number????
cp = str = (char*) malloc (count * (kLongest + 1) + 1);
for (i = 0; i < NUM_FLAGS; i++) {
if (flags & 0x01) {
const char* accessStr = kAccessStrings[forWhat][i];
int len = strlen(accessStr);
if (cp != str) {
*cp++ = ' ';
}
memcpy(cp, accessStr, len);
cp += len;
}
flags >>= 1;
}
*cp = '\0';
return str;
}
static char *dex_type_descriptor(RBinDexObj *bin, int type_idx) {
if (type_idx < 0 || type_idx >= bin->header.types_size) {
return NULL;
}
return getstr (bin, bin->types[type_idx].descriptor_id);
}
static char *dex_method_signature(RBinDexObj *bin, int method_idx) {
ut32 proto_id, params_off, type_id, list_size;
char *r, *return_type = NULL, *signature = NULL, *buff = NULL;
ut8 *bufptr;
ut16 type_idx;
int pos = 0, i, size = 1;
if (method_idx < 0 || method_idx >= bin->header.method_size) {
return NULL;
}
proto_id = bin->methods[method_idx].proto_id;
if (proto_id >= bin->header.prototypes_size) {
return NULL;
}
params_off = bin->protos[proto_id].parameters_off;
if (params_off >= bin->size) {
return NULL;
}
type_id = bin->protos[proto_id].return_type_id;
if (type_id >= bin->header.types_size ) {
return NULL;
}
return_type = getstr (bin, bin->types[type_id].descriptor_id);
if (!return_type) {
return NULL;
}
if (!params_off) {
return r_str_newf ("()%s", return_type);;
}
bufptr = bin->b->buf;
// size of the list, in entries
list_size = r_read_le32 (bufptr + params_off);
//XXX again list_size is user controlled huge loop
for (i = 0; i < list_size; i++) {
int buff_len = 0;
if (params_off + 4 + (i * 2) >= bin->size) {
break;
}
type_idx = r_read_le16 (bufptr + params_off + 4 + (i * 2));
if (type_idx < 0 ||
type_idx >=
bin->header.types_size || type_idx >= bin->size) {
break;
}
buff = getstr (bin, bin->types[type_idx].descriptor_id);
if (!buff) {
break;
}
buff_len = strlen (buff);
size += buff_len + 1;
signature = realloc (signature, size);
strcpy (signature + pos, buff);
pos += buff_len;
signature[pos] = '\0';
}
r = r_str_newf ("(%s)%s", signature, return_type);
free (buff);
free (signature);
return r;
}
static RList *dex_method_signature2(RBinDexObj *bin, int method_idx) {
ut32 proto_id, params_off, list_size;
char *buff = NULL;
ut8 *bufptr;
ut16 type_idx;
int i;
RList *params = r_list_newf (free);
if (!params) {
return NULL;
}
if (method_idx < 0 || method_idx >= bin->header.method_size) {
goto out_error;
}
proto_id = bin->methods[method_idx].proto_id;
if (proto_id >= bin->header.prototypes_size) {
goto out_error;
}
params_off = bin->protos[proto_id].parameters_off;
if (params_off >= bin->size) {
goto out_error;
}
if (!params_off) {
return params;
}
bufptr = bin->b->buf;
// size of the list, in entries
list_size = r_read_le32 (bufptr + params_off);
//XXX list_size tainted it may produce huge loop
for (i = 0; i < list_size; i++) {
ut64 of = params_off + 4 + (i * 2);
if (of >= bin->size || of < params_off) {
break;
}
type_idx = r_read_le16 (bufptr + of);
if (type_idx >= bin->header.types_size ||
type_idx > bin->size) {
break;
}
buff = getstr (bin, bin->types[type_idx].descriptor_id);
if (!buff) {
break;
}
r_list_append (params, buff);
}
return params;
out_error:
r_list_free (params);
return NULL;
}
// TODO: fix this, now has more registers that it should
// https://github.com/android/platform_dalvik/blob/0641c2b4836fae3ee8daf6c0af45c316c84d5aeb/libdex/DexDebugInfo.cpp#L312
// https://github.com/android/platform_dalvik/blob/0641c2b4836fae3ee8daf6c0af45c316c84d5aeb/libdex/DexDebugInfo.cpp#L141
static void dex_parse_debug_item(RBinFile *binfile, RBinDexObj *bin,
RBinDexClass *c, int MI, int MA, int paddr, int ins_size,
int insns_size, char *class_name, int regsz,
int debug_info_off) {
struct r_bin_t *rbin = binfile->rbin;
const ut8 *p4 = r_buf_get_at (binfile->buf, debug_info_off, NULL);
const ut8 *p4_end = p4 + binfile->buf->length - debug_info_off;
ut64 line_start;
ut64 parameters_size;
ut64 param_type_idx;
ut16 argReg = regsz - ins_size;
ut64 source_file_idx = c->source_file;
RList *params, *debug_positions, *emitted_debug_locals = NULL;
bool keep = true;
if (argReg > regsz) {
return; // this return breaks tests
}
p4 = r_uleb128 (p4, p4_end - p4, &line_start);
p4 = r_uleb128 (p4, p4_end - p4, ¶meters_size);
// TODO: check when we should use source_file
// The state machine consists of five registers
ut32 address = 0;
ut32 line = line_start;
if (!(debug_positions = r_list_newf ((RListFree)free))) {
return;
}
if (!(emitted_debug_locals = r_list_newf ((RListFree)free))) {
r_list_free (debug_positions);
return;
}
struct dex_debug_local_t debug_locals[regsz];
memset (debug_locals, 0, sizeof (struct dex_debug_local_t) * regsz);
if (!(MA & 0x0008)) {
debug_locals[argReg].name = "this";
debug_locals[argReg].descriptor = r_str_newf("%s;", class_name);
debug_locals[argReg].startAddress = 0;
debug_locals[argReg].signature = NULL;
debug_locals[argReg].live = true;
argReg++;
}
if (!(params = dex_method_signature2 (bin, MI))) {
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
return;
}
RListIter *iter = r_list_iterator (params);
char *name;
char *type;
int reg;
r_list_foreach (params, iter, type) {
if ((argReg >= regsz) || !type || parameters_size <= 0) {
r_list_free (debug_positions);
r_list_free (params);
r_list_free (emitted_debug_locals);
return;
}
p4 = r_uleb128 (p4, p4_end - p4, ¶m_type_idx); // read uleb128p1
param_type_idx -= 1;
name = getstr (bin, param_type_idx);
reg = argReg;
switch (type[0]) {
case 'D':
case 'J':
argReg += 2;
break;
default:
argReg += 1;
break;
}
if (name) {
debug_locals[reg].name = name;
debug_locals[reg].descriptor = type;
debug_locals[reg].signature = NULL;
debug_locals[reg].startAddress = address;
debug_locals[reg].live = true;
}
--parameters_size;
}
ut8 opcode = *(p4++) & 0xff;
while (keep) {
switch (opcode) {
case 0x0: // DBG_END_SEQUENCE
keep = false;
break;
case 0x1: // DBG_ADVANCE_PC
{
ut64 addr_diff;
p4 = r_uleb128 (p4, p4_end - p4, &addr_diff);
address += addr_diff;
}
break;
case 0x2: // DBG_ADVANCE_LINE
{
st64 line_diff = r_sleb128 (&p4, p4_end);
line += line_diff;
}
break;
case 0x3: // DBG_START_LOCAL
{
ut64 register_num;
ut64 name_idx;
ut64 type_idx;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
p4 = r_uleb128 (p4, p4_end - p4, &name_idx);
name_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &type_idx);
type_idx -= 1;
if (register_num >= regsz) {
r_list_free (debug_positions);
r_list_free (params);
return;
}
// Emit what was previously there, if anything
// emitLocalCbIfLive
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].name = getstr (bin, name_idx);
debug_locals[register_num].descriptor = dex_type_descriptor (bin, type_idx);
debug_locals[register_num].startAddress = address;
debug_locals[register_num].signature = NULL;
debug_locals[register_num].live = true;
//eprintf("DBG_START_LOCAL %x %x %x\n", register_num, name_idx, type_idx);
}
break;
case 0x4: //DBG_START_LOCAL_EXTENDED
{
ut64 register_num;
ut64 name_idx;
ut64 type_idx;
ut64 sig_idx;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
p4 = r_uleb128 (p4, p4_end - p4, &name_idx);
name_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &type_idx);
type_idx -= 1;
p4 = r_uleb128 (p4, p4_end - p4, &sig_idx);
sig_idx -= 1;
if (register_num >= regsz) {
r_list_free (debug_positions);
r_list_free (params);
return;
}
// Emit what was previously there, if anything
// emitLocalCbIfLive
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].name = getstr (bin, name_idx);
debug_locals[register_num].descriptor = dex_type_descriptor (bin, type_idx);
debug_locals[register_num].startAddress = address;
debug_locals[register_num].signature = getstr (bin, sig_idx);
debug_locals[register_num].live = true;
}
break;
case 0x5: // DBG_END_LOCAL
{
ut64 register_num;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
// emitLocalCbIfLive
if (debug_locals[register_num].live) {
struct dex_debug_local_t *local = malloc (
sizeof (struct dex_debug_local_t));
if (!local) {
keep = false;
break;
}
local->name = debug_locals[register_num].name;
local->descriptor = debug_locals[register_num].descriptor;
local->startAddress = debug_locals[register_num].startAddress;
local->signature = debug_locals[register_num].signature;
local->live = true;
local->reg = register_num;
local->endAddress = address;
r_list_append (emitted_debug_locals, local);
}
debug_locals[register_num].live = false;
}
break;
case 0x6: // DBG_RESTART_LOCAL
{
ut64 register_num;
p4 = r_uleb128 (p4, p4_end - p4, ®ister_num);
if (!debug_locals[register_num].live) {
debug_locals[register_num].startAddress = address;
debug_locals[register_num].live = true;
}
}
break;
case 0x7: //DBG_SET_PROLOGUE_END
break;
case 0x8: //DBG_SET_PROLOGUE_BEGIN
break;
case 0x9:
{
p4 = r_uleb128 (p4, p4_end - p4, &source_file_idx);
source_file_idx--;
}
break;
default:
{
int adjusted_opcode = opcode - 0x0a;
address += (adjusted_opcode / 15);
line += -4 + (adjusted_opcode % 15);
struct dex_debug_position_t *position =
malloc (sizeof (struct dex_debug_position_t));
if (!position) {
keep = false;
break;
}
position->source_file_idx = source_file_idx;
position->address = address;
position->line = line;
r_list_append (debug_positions, position);
}
break;
}
opcode = *(p4++) & 0xff;
}
if (!binfile->sdb_addrinfo) {
binfile->sdb_addrinfo = sdb_new0 ();
}
char *fileline;
char offset[64];
char *offset_ptr;
RListIter *iter1;
struct dex_debug_position_t *pos;
r_list_foreach (debug_positions, iter1, pos) {
fileline = r_str_newf ("%s|%"PFMT64d, getstr (bin, pos->source_file_idx), pos->line);
offset_ptr = sdb_itoa (pos->address + paddr, offset, 16);
sdb_set (binfile->sdb_addrinfo, offset_ptr, fileline, 0);
sdb_set (binfile->sdb_addrinfo, fileline, offset_ptr, 0);
}
if (!dexdump) {
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
r_list_free (params);
return;
}
RListIter *iter2;
struct dex_debug_position_t *position;
rbin->cb_printf (" positions :\n");
r_list_foreach (debug_positions, iter2, position) {
rbin->cb_printf (" 0x%04llx line=%llu\n",
position->address, position->line);
}
rbin->cb_printf (" locals :\n");
RListIter *iter3;
struct dex_debug_local_t *local;
r_list_foreach (emitted_debug_locals, iter3, local) {
if (local->signature) {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s %s\n",
local->startAddress, local->endAddress,
local->reg, local->name, local->descriptor,
local->signature);
} else {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s\n",
local->startAddress, local->endAddress,
local->reg, local->name, local->descriptor);
}
}
for (reg = 0; reg < regsz; reg++) {
if (debug_locals[reg].live) {
if (debug_locals[reg].signature) {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s "
"%s\n",
debug_locals[reg].startAddress,
insns_size, reg, debug_locals[reg].name,
debug_locals[reg].descriptor,
debug_locals[reg].signature);
} else {
rbin->cb_printf (
" 0x%04x - 0x%04x reg=%d %s %s"
"\n",
debug_locals[reg].startAddress,
insns_size, reg, debug_locals[reg].name,
debug_locals[reg].descriptor);
}
}
}
r_list_free (debug_positions);
r_list_free (emitted_debug_locals);
r_list_free (params);
}
static int check (RBinFile *arch);
static int check_bytes (const ut8 *buf, ut64 length);
static Sdb *get_sdb (RBinObject *o) {
if (!o || !o->bin_obj) {
return NULL;
}
struct r_bin_dex_obj_t *bin = (struct r_bin_dex_obj_t *) o->bin_obj;
if (bin->kv) {
return bin->kv;
}
return NULL;
}
static void *load_bytes(RBinFile *arch, const ut8 *buf, ut64 sz, ut64 loadaddr, Sdb *sdb){
void *res = NULL;
RBuffer *tbuf = NULL;
if (!buf || !sz || sz == UT64_MAX) {
return NULL;
}
tbuf = r_buf_new ();
if (!tbuf) {
return NULL;
}
r_buf_set_bytes (tbuf, buf, sz);
res = r_bin_dex_new_buf (tbuf);
r_buf_free (tbuf);
return res;
}
static int load(RBinFile *arch) {
const ut8 *bytes = arch ? r_buf_buffer (arch->buf) : NULL;
ut64 sz = arch ? r_buf_size (arch->buf): 0;
if (!arch || !arch->o) {
return false;
}
arch->o->bin_obj = load_bytes (arch, bytes, sz, arch->o->loadaddr, arch->sdb);
return arch->o->bin_obj ? true: false;
}
static ut64 baddr(RBinFile *arch) {
return 0;
}
static int check(RBinFile *arch) {
const ut8 *bytes = arch ? r_buf_buffer (arch->buf) : NULL;
ut64 sz = arch ? r_buf_size (arch->buf): 0;
return check_bytes (bytes, sz);
}
static int check_bytes(const ut8 *buf, ut64 length) {
if (!buf || length < 8) {
return false;
}
// Non-extended opcode dex file
if (!memcmp (buf, "dex\n035\0", 8)) {
return true;
}
// Extended (jumnbo) opcode dex file, ICS+ only (sdk level 14+)
if (!memcmp (buf, "dex\n036\0", 8)) {
return true;
}
// M3 (Nov-Dec 07)
if (!memcmp (buf, "dex\n009\0", 8)) {
return true;
}
// M5 (Feb-Mar 08)
if (!memcmp (buf, "dex\n009\0", 8)) {
return true;
}
// Default fall through, should still be a dex file
if (!memcmp (buf, "dex\n", 4)) {
return true;
}
return false;
}
static RBinInfo *info(RBinFile *arch) {
RBinHash *h;
RBinInfo *ret = R_NEW0 (RBinInfo);
if (!ret) {
return NULL;
}
ret->file = arch->file? strdup (arch->file): NULL;
ret->type = strdup ("DEX CLASS");
ret->has_va = false;
ret->bclass = r_bin_dex_get_version (arch->o->bin_obj);
ret->rclass = strdup ("class");
ret->os = strdup ("linux");
ret->subsystem = strdup ("any");
ret->machine = strdup ("Dalvik VM");
h = &ret->sum[0];
h->type = "sha1";
h->len = 20;
h->addr = 12;
h->from = 12;
h->to = arch->buf->length-32;
memcpy (h->buf, arch->buf->buf+12, 20);
h = &ret->sum[1];
h->type = "adler32";
h->len = 4;
h->addr = 0x8;
h->from = 12;
h->to = arch->buf->length-h->from;
h = &ret->sum[2];
h->type = 0;
memcpy (h->buf, arch->buf->buf + 8, 4);
{
ut32 *fc = (ut32 *)(arch->buf->buf + 8);
ut32 cc = __adler32 (arch->buf->buf + 12, arch->buf->length - 12);
if (*fc != cc) {
eprintf ("# adler32 checksum doesn't match. Type this to fix it:\n");
eprintf ("wx `#sha1 $s-32 @32` @12 ; wx `#adler32 $s-12 @12` @8\n");
}
}
ret->arch = strdup ("dalvik");
ret->lang = "dalvik";
ret->bits = 32;
ret->big_endian = 0;
ret->dbg_info = 0; //1 | 4 | 8; /* Stripped | LineNums | Syms */
return ret;
}
static RList *strings(RBinFile *arch) {
struct r_bin_dex_obj_t *bin = NULL;
RBinString *ptr = NULL;
RList *ret = NULL;
int i, len;
ut8 buf[6];
ut64 off;
if (!arch || !arch->o) {
return NULL;
}
bin = (struct r_bin_dex_obj_t *) arch->o->bin_obj;
if (!bin || !bin->strings) {
return NULL;
}
if (bin->header.strings_size > bin->size) {
bin->strings = NULL;
return NULL;
}
if (!(ret = r_list_newf (free))) {
return NULL;
}
for (i = 0; i < bin->header.strings_size; i++) {
if (!(ptr = R_NEW0 (RBinString))) {
break;
}
if (bin->strings[i] > bin->size || bin->strings[i] + 6 > bin->size) {
goto out_error;
}
r_buf_read_at (bin->b, bin->strings[i], (ut8*)&buf, 6);
len = dex_read_uleb128 (buf);
if (len > 1 && len < R_BIN_SIZEOF_STRINGS) {
ptr->string = malloc (len + 1);
if (!ptr->string) {
goto out_error;
}
off = bin->strings[i] + dex_uleb128_len (buf);
if (off + len >= bin->size || off + len < len) {
free (ptr->string);
goto out_error;
}
r_buf_read_at (bin->b, off, (ut8*)ptr->string, len);
ptr->string[len] = 0;
ptr->vaddr = ptr->paddr = bin->strings[i];
ptr->size = len;
ptr->length = len;
ptr->ordinal = i+1;
r_list_append (ret, ptr);
} else {
free (ptr);
}
}
return ret;
out_error:
r_list_free (ret);
free (ptr);
return NULL;
}
static char *dex_method_name(RBinDexObj *bin, int idx) {
if (idx < 0 || idx >= bin->header.method_size) {
return NULL;
}
int cid = bin->methods[idx].class_id;
if (cid < 0 || cid >= bin->header.strings_size) {
return NULL;
}
int tid = bin->methods[idx].name_id;
if (tid < 0 || tid >= bin->header.strings_size) {
return NULL;
}
return getstr (bin, tid);
}
static char *dex_class_name_byid(RBinDexObj *bin, int cid) {
int tid;
if (!bin || !bin->types) {
return NULL;
}
if (cid < 0 || cid >= bin->header.types_size) {
return NULL;
}
tid = bin->types[cid].descriptor_id;
return getstr (bin, tid);
}
static char *dex_class_name(RBinDexObj *bin, RBinDexClass *c) {
return dex_class_name_byid (bin, c->class_id);
}
static char *dex_field_name(RBinDexObj *bin, int fid) {
int cid, tid, type_id;
if (!bin || !bin->fields) {
return NULL;
}
if (fid < 0 || fid >= bin->header.fields_size) {
return NULL;
}
cid = bin->fields[fid].class_id;
if (cid < 0 || cid >= bin->header.types_size) {
return NULL;
}
type_id = bin->fields[fid].type_id;
if (type_id < 0 || type_id >= bin->header.types_size) {
return NULL;
}
tid = bin->fields[fid].name_id;
return r_str_newf ("%s->%s %s", getstr (bin, bin->types[cid].descriptor_id),
getstr (bin, tid), getstr (bin, bin->types[type_id].descriptor_id));
}
static char *dex_method_fullname(RBinDexObj *bin, int method_idx) {
if (!bin || !bin->types) {
return NULL;
}
if (method_idx < 0 || method_idx >= bin->header.method_size) {
return NULL;
}
int cid = bin->methods[method_idx].class_id;
if (cid < 0 || cid >= bin->header.types_size) {
return NULL;
}
char *name = dex_method_name (bin, method_idx);
char *class_name = dex_class_name_byid (bin, cid);
class_name = r_str_replace (class_name, ";", "", 0); //TODO: move to func
char *signature = dex_method_signature (bin, method_idx);
char *flagname = r_str_newf ("%s.%s%s", class_name, name, signature);
free (name);
free (class_name);
free (signature);
return flagname;
}
static ut64 dex_get_type_offset(RBinFile *arch, int type_idx) {
RBinDexObj *bin = (RBinDexObj*) arch->o->bin_obj;
if (!bin || !bin->types) {
return 0;
}
if (type_idx < 0 || type_idx >= bin->header.types_size) {
return 0;
}
return bin->header.types_offset + type_idx * 0x04; //&bin->types[type_idx];
}
static void __r_bin_class_free(RBinClass *p) {
r_list_free (p->methods);
r_list_free (p->fields);
r_bin_class_free (p);
}
static char *dex_class_super_name(RBinDexObj *bin, RBinDexClass *c) {
int cid, tid;
if (!bin || !c || !bin->types) {
return NULL;
}
cid = c->super_class;
if (cid < 0 || cid >= bin->header.types_size) {
return NULL;
}
tid = bin->types[cid].descriptor_id;
return getstr (bin, tid);
}
static const ut8 *parse_dex_class_fields(RBinFile *binfile, RBinDexObj *bin,
RBinDexClass *c, RBinClass *cls,
const ut8 *p, const ut8 *p_end,
int *sym_count, ut64 fields_count,
bool is_sfield) {
struct r_bin_t *rbin = binfile->rbin;
ut64 lastIndex = 0;
ut8 ff[sizeof (DexField)] = {0};
int total, i, tid;
DexField field;
const char* type_str;
for (i = 0; i < fields_count; i++) {
ut64 fieldIndex, accessFlags;
p = r_uleb128 (p, p_end - p, &fieldIndex); // fieldIndex
p = r_uleb128 (p, p_end - p, &accessFlags); // accessFlags
fieldIndex += lastIndex;
total = bin->header.fields_offset + (sizeof (DexField) * fieldIndex);
if (total >= bin->size || total < bin->header.fields_offset) {
break;
}
if (r_buf_read_at (binfile->buf, total, ff,
sizeof (DexField)) != sizeof (DexField)) {
break;
}
field.class_id = r_read_le16 (ff);
field.type_id = r_read_le16 (ff + 2);
field.name_id = r_read_le32 (ff + 4);
char *fieldName = getstr (bin, field.name_id);
if (field.type_id >= bin->header.types_size) {
break;
}
tid = bin->types[field.type_id].descriptor_id;
type_str = getstr (bin, tid);
RBinSymbol *sym = R_NEW0 (RBinSymbol);
if (is_sfield) {
sym->name = r_str_newf ("%s.sfield_%s:%s", cls->name,
fieldName, type_str);
sym->type = r_str_const ("STATIC");
} else {
sym->name = r_str_newf ("%s.ifield_%s:%s", cls->name,
fieldName, type_str);
sym->type = r_str_const ("FIELD");
}
sym->name = r_str_replace (sym->name, "method.", "", 0);
//sym->name = r_str_replace (sym->name, ";", "", 0);
sym->paddr = sym->vaddr = total;
sym->ordinal = (*sym_count)++;
if (dexdump) {
const char *accessStr = createAccessFlagStr (
accessFlags, kAccessForField);
rbin->cb_printf (" #%d : (in %s;)\n", i,
cls->name);
rbin->cb_printf (" name : '%s'\n", fieldName);
rbin->cb_printf (" type : '%s'\n", type_str);
rbin->cb_printf (" access : 0x%04x (%s)\n",
(unsigned int)accessFlags, accessStr);
}
r_list_append (bin->methods_list, sym);
r_list_append (cls->fields, sym);
lastIndex = fieldIndex;
}
return p;
}
// TODO: refactor this method
// XXX it needs a lot of love!!!
static const ut8 *parse_dex_class_method(RBinFile *binfile, RBinDexObj *bin,
RBinDexClass *c, RBinClass *cls,
const ut8 *p, const ut8 *p_end,
int *sym_count, ut64 DM, int *methods,
bool is_direct) {
struct r_bin_t *rbin = binfile->rbin;
ut8 ff2[16] = {0};
ut8 ff3[8] = {0};
int i;
ut64 omi = 0;
bool catchAll;
ut16 regsz, ins_size, outs_size, tries_size;
ut16 handler_off, start_addr, insn_count;
ut32 debug_info_off, insns_size;
const ut8 *encoded_method_addr;
for (i = 0; i < DM; i++) {
encoded_method_addr = p;
char *method_name, *flag_name;
ut64 MI, MA, MC;
p = r_uleb128 (p, p_end - p, &MI);
MI += omi;
omi = MI;
p = r_uleb128 (p, p_end - p, &MA);
p = r_uleb128 (p, p_end - p, &MC);
// TODO: MOVE CHECKS OUTSIDE!
if (MI < bin->header.method_size) {
if (methods) {
methods[MI] = 1;
}
}
method_name = dex_method_name (bin, MI);
char *signature = dex_method_signature (bin, MI);
if (!method_name) {
method_name = strdup ("unknown");
}
flag_name = r_str_newf ("%s.method.%s%s", cls->name,
method_name, signature);
if (!flag_name) {
R_FREE (method_name);
R_FREE (signature);
continue;
}
// TODO: check size
// ut64 prolog_size = 2 + 2 + 2 + 2 + 4 + 4;
ut64 v2, handler_type, handler_addr;
int t;
if (MC > 0) {
// TODO: parse debug info
// XXX why binfile->buf->base???
if (MC + 16 >= bin->size || MC + 16 < MC) {
R_FREE (method_name);
R_FREE (flag_name);
R_FREE (signature);
continue;
}
if (r_buf_read_at (binfile->buf,
binfile->buf->base + MC, ff2,
16) < 1) {
R_FREE (method_name);
R_FREE (flag_name);
R_FREE (signature);
continue;
}
regsz = r_read_le16 (ff2);
ins_size = r_read_le16 (ff2 + 2);
outs_size = r_read_le16 (ff2 + 4);
tries_size = r_read_le16 (ff2 + 6);
debug_info_off = r_read_le32 (ff2 + 8);
insns_size = r_read_le32 (ff2 + 12);
int padd = 0;
if (tries_size > 0 && insns_size % 2) {
padd = 2;
}
t = 16 + 2 * insns_size + padd;
}
if (dexdump) {
const char* accessStr = createAccessFlagStr (MA, kAccessForMethod);
rbin->cb_printf (" #%d : (in %s;)\n", i, cls->name);
rbin->cb_printf (" name : '%s'\n", method_name);
rbin->cb_printf (" type : '%s'\n", signature);
rbin->cb_printf (" access : 0x%04x (%s)\n",
(unsigned int)MA, accessStr);
}
if (MC > 0) {
if (dexdump) {
rbin->cb_printf (" code -\n");
rbin->cb_printf (" registers : %d\n", regsz);
rbin->cb_printf (" ins : %d\n", ins_size);
rbin->cb_printf (" outs : %d\n", outs_size);
rbin->cb_printf (
" insns size : %d 16-bit code "
"units\n",
insns_size);
}
if (tries_size > 0) {
if (dexdump) {
rbin->cb_printf (" catches : %d\n", tries_size);
}
int j, m = 0;
//XXX bucle controlled by tainted variable it could produces huge loop
for (j = 0; j < tries_size; ++j) {
ut64 offset = MC + t + j * 8;
if (offset >= bin->size || offset < MC) {
R_FREE (signature);
break;
}
if (r_buf_read_at (
binfile->buf,
binfile->buf->base + offset,
ff3, 8) < 1) {
// free (method_name);
R_FREE (signature);
break;
}
start_addr = r_read_le32 (ff3);
insn_count = r_read_le16 (ff3 + 4);
handler_off = r_read_le16 (ff3 + 6);
char* s = NULL;
if (dexdump) {
rbin->cb_printf (
" 0x%04x - "
"0x%04x\n",
start_addr,
(start_addr +
insn_count));
}
const ut8 *p3, *p3_end;
//XXX tries_size is tainted and oob here
int off = MC + t + tries_size * 8 + handler_off;
if (off >= bin->size || off < tries_size) {
R_FREE (signature);
break;
}
p3 = r_buf_get_at (binfile->buf, off, NULL);
p3_end = p3 + binfile->buf->length - off;
st64 size = r_sleb128 (&p3, p3_end);
if (size <= 0) {
catchAll = true;
size = -size;
} else {
catchAll = false;
}
for (m = 0; m < size; m++) {
p3 = r_uleb128 (p3, p3_end - p3, &handler_type);
p3 = r_uleb128 (p3, p3_end - p3, &handler_addr);
if (handler_type > 0 &&
handler_type <
bin->header.types_size) {
s = getstr (bin, bin->types[handler_type].descriptor_id);
if (dexdump) {
rbin->cb_printf (
" %s "
"-> 0x%04llx\n",
s,
handler_addr);
}
} else {
if (dexdump) {
rbin->cb_printf (
" "
"(error) -> "
"0x%04llx\n",
handler_addr);
}
}
}
if (catchAll) {
p3 = r_uleb128 (p3, p3_end - p3, &v2);
if (dexdump) {
rbin->cb_printf (
" "
"<any> -> "
"0x%04llx\n",
v2);
}
}
}
} else {
if (dexdump) {
rbin->cb_printf (
" catches : "
"(none)\n");
}
}
} else {
if (dexdump) {
rbin->cb_printf (
" code : (none)\n");
}
}
if (*flag_name) {
RBinSymbol *sym = R_NEW0 (RBinSymbol);
sym->name = flag_name;
// is_direct is no longer used
// if method has code *addr points to code
// otherwise it points to the encoded method
if (MC > 0) {
sym->type = r_str_const ("FUNC");
sym->paddr = MC;// + 0x10;
sym->vaddr = MC;// + 0x10;
} else {
sym->type = r_str_const ("METH");
sym->paddr = encoded_method_addr - binfile->buf->buf;
sym->vaddr = encoded_method_addr - binfile->buf->buf;
}
if ((MA & 0x1) == 0x1) {
sym->bind = r_str_const ("GLOBAL");
} else {
sym->bind = r_str_const ("LOCAL");
}
sym->ordinal = (*sym_count)++;
if (MC > 0) {
if (r_buf_read_at (binfile->buf, binfile->buf->base + MC, ff2, 16) < 1) {
R_FREE (sym);
R_FREE (signature);
continue;
}
//ut16 regsz = r_read_le16 (ff2);
//ut16 ins_size = r_read_le16 (ff2 + 2);
//ut16 outs_size = r_read_le16 (ff2 + 4);
ut16 tries_size = r_read_le16 (ff2 + 6);
//ut32 debug_info_off = r_read_le32 (ff2 + 8);
ut32 insns_size = r_read_le32 (ff2 + 12);
ut64 prolog_size = 2 + 2 + 2 + 2 + 4 + 4;
if (tries_size > 0) {
//prolog_size += 2 + 8*tries_size; // we need to parse all so the catch info...
}
// TODO: prolog_size
sym->paddr = MC + prolog_size;// + 0x10;
sym->vaddr = MC + prolog_size;// + 0x10;
//if (is_direct) {
sym->size = insns_size * 2;
//}
//eprintf("%s (0x%x-0x%x) size=%d\nregsz=%d\ninsns_size=%d\nouts_size=%d\ntries_size=%d\ninsns_size=%d\n", flag_name, sym->vaddr, sym->vaddr+sym->size, prolog_size, regsz, ins_size, outs_size, tries_size, insns_size);
r_list_append (bin->methods_list, sym);
r_list_append (cls->methods, sym);
if (bin->code_from > sym->paddr) {
bin->code_from = sym->paddr;
}
if (bin->code_to < sym->paddr) {
bin->code_to = sym->paddr;
}
if (!mdb) {
mdb = sdb_new0 ();
}
sdb_num_set (mdb, sdb_fmt (0, "method.%d", MI), sym->paddr, 0);
// -----------------
// WORK IN PROGRESS
// -----------------
if (0) {
if (MA & 0x10000) { //ACC_CONSTRUCTOR
if (!cdb) {
cdb = sdb_new0 ();
}
sdb_num_set (cdb, sdb_fmt (0, "%d", c->class_id), sym->paddr, 0);
}
}
} else {
sym->size = 0;
r_list_append (bin->methods_list, sym);
r_list_append (cls->methods, sym);
}
if (MC > 0 && debug_info_off > 0 && bin->header.data_offset < debug_info_off &&
debug_info_off < bin->header.data_offset + bin->header.data_size) {
dex_parse_debug_item (binfile, bin, c, MI, MA, sym->paddr, ins_size,
insns_size, cls->name, regsz, debug_info_off);
} else if (MC > 0) {
if (dexdump) {
rbin->cb_printf (" positions :\n");
rbin->cb_printf (" locals :\n");
}
}
} else {
R_FREE (flag_name);
}
R_FREE (signature);
R_FREE (method_name);
}
return p;
}
static void parse_class(RBinFile *binfile, RBinDexObj *bin, RBinDexClass *c,
int class_index, int *methods, int *sym_count) {
struct r_bin_t *rbin = binfile->rbin;
char *class_name;
int z;
const ut8 *p, *p_end;
if (!c) {
return;
}
class_name = dex_class_name (bin, c);
class_name = r_str_replace (class_name, ";", "", 0); //TODO: move to func
if (!class_name || !*class_name) {
return;
}
RBinClass *cls = R_NEW0 (RBinClass);
if (!cls) {
return;
}
cls->name = class_name;
cls->index = class_index;
cls->addr = bin->header.class_offset + class_index * DEX_CLASS_SIZE;
cls->methods = r_list_new ();
if (!cls->methods) {
free (cls);
return;
}
cls->fields = r_list_new ();
if (!cls->fields) {
r_list_free (cls->methods);
free (cls);
return;
}
r_list_append (bin->classes_list, cls);
if (dexdump) {
rbin->cb_printf (" Class descriptor : '%s;'\n", class_name);
rbin->cb_printf (
" Access flags : 0x%04x (%s)\n", c->access_flags,
createAccessFlagStr (c->access_flags, kAccessForClass));
rbin->cb_printf (" Superclass : '%s'\n",
dex_class_super_name (bin, c));
rbin->cb_printf (" Interfaces -\n");
}
if (c->interfaces_offset > 0 &&
bin->header.data_offset < c->interfaces_offset &&
c->interfaces_offset <
bin->header.data_offset + bin->header.data_size) {
p = r_buf_get_at (binfile->buf, c->interfaces_offset, NULL);
int types_list_size = r_read_le32 (p);
if (types_list_size < 0 || types_list_size >= bin->header.types_size ) {
return;
}
for (z = 0; z < types_list_size; z++) {
int t = r_read_le16 (p + 4 + z * 2);
if (t > 0 && t < bin->header.types_size ) {
int tid = bin->types[t].descriptor_id;
if (dexdump) {
rbin->cb_printf (
" #%d : '%s'\n",
z, getstr (bin, tid));
}
}
}
}
// TODO: this is quite ugly
if (!c || !c->class_data_offset) {
if (dexdump) {
rbin->cb_printf (
" Static fields -\n Instance fields "
"-\n Direct methods -\n Virtual methods "
"-\n");
}
} else {
// TODO: move to func, def or inline
// class_data_offset => [class_offset, class_defs_off+class_defs_size*32]
if (bin->header.class_offset > c->class_data_offset ||
c->class_data_offset <
bin->header.class_offset +
bin->header.class_size * DEX_CLASS_SIZE) {
return;
}
p = r_buf_get_at (binfile->buf, c->class_data_offset, NULL);
p_end = p + binfile->buf->length - c->class_data_offset;
//XXX check for NULL!!
c->class_data = (struct dex_class_data_item_t *)malloc (
sizeof (struct dex_class_data_item_t));
p = r_uleb128 (p, p_end - p, &c->class_data->static_fields_size);
p = r_uleb128 (p, p_end - p, &c->class_data->instance_fields_size);
p = r_uleb128 (p, p_end - p, &c->class_data->direct_methods_size);
p = r_uleb128 (p, p_end - p, &c->class_data->virtual_methods_size);
if (dexdump) {
rbin->cb_printf (" Static fields -\n");
}
p = parse_dex_class_fields (
binfile, bin, c, cls, p, p_end, sym_count,
c->class_data->static_fields_size, true);
if (dexdump) {
rbin->cb_printf (" Instance fields -\n");
}
p = parse_dex_class_fields (
binfile, bin, c, cls, p, p_end, sym_count,
c->class_data->instance_fields_size, false);
if (dexdump) {
rbin->cb_printf (" Direct methods -\n");
}
p = parse_dex_class_method (
binfile, bin, c, cls, p, p_end, sym_count,
c->class_data->direct_methods_size, methods, true);
if (dexdump) {
rbin->cb_printf (" Virtual methods -\n");
}
p = parse_dex_class_method (
binfile, bin, c, cls, p, p_end, sym_count,
c->class_data->virtual_methods_size, methods, false);
}
if (dexdump) {
char *source_file = getstr (bin, c->source_file);
if (!source_file) {
rbin->cb_printf (
" source_file_idx : %d (unknown)\n\n",
c->source_file);
} else {
rbin->cb_printf (" source_file_idx : %d (%s)\n\n",
c->source_file, source_file);
}
}
// TODO:!!!!
// FIX: FREE BEFORE ALLOCATE!!!
//free (class_name);
}
static bool is_class_idx_in_code_classes(RBinDexObj *bin, int class_idx) {
int i;
for (i = 0; i < bin->header.class_size; i++) {
if (class_idx == bin->classes[i].class_id) {
return true;
}
}
return false;
}
static int dex_loadcode(RBinFile *arch, RBinDexObj *bin) {
struct r_bin_t *rbin = arch->rbin;
int i;
int *methods = NULL;
int sym_count = 0;
// doublecheck??
if (!bin || bin->methods_list) {
return false;
}
bin->code_from = UT64_MAX;
bin->code_to = 0;
bin->methods_list = r_list_newf ((RListFree)free);
if (!bin->methods_list) {
return false;
}
bin->imports_list = r_list_newf ((RListFree)free);
if (!bin->imports_list) {
r_list_free (bin->methods_list);
return false;
}
bin->classes_list = r_list_newf ((RListFree)__r_bin_class_free);
if (!bin->classes_list) {
r_list_free (bin->methods_list);
r_list_free (bin->imports_list);
return false;
}
if (bin->header.method_size>bin->size) {
bin->header.method_size = 0;
return false;
}
/* WrapDown the header sizes to avoid huge allocations */
bin->header.method_size = R_MIN (bin->header.method_size, bin->size);
bin->header.class_size = R_MIN (bin->header.class_size, bin->size);
bin->header.strings_size = R_MIN (bin->header.strings_size, bin->size);
// TODO: is this posible after R_MIN ??
if (bin->header.strings_size > bin->size) {
eprintf ("Invalid strings size\n");
return false;
}
if (bin->classes) {
ut64 amount = sizeof (int) * bin->header.method_size;
if (amount > UT32_MAX || amount < bin->header.method_size) {
return false;
}
methods = calloc (1, amount + 1);
for (i = 0; i < bin->header.class_size; i++) {
char *super_name, *class_name;
struct dex_class_t *c = &bin->classes[i];
class_name = dex_class_name (bin, c);
super_name = dex_class_super_name (bin, c);
if (dexdump) {
rbin->cb_printf ("Class #%d -\n", i);
}
parse_class (arch, bin, c, i, methods, &sym_count);
free (class_name);
free (super_name);
}
}
if (methods) {
int import_count = 0;
int sym_count = bin->methods_list->length;
for (i = 0; i < bin->header.method_size; i++) {
int len = 0;
if (methods[i]) {
continue;
}
if (bin->methods[i].class_id >= bin->header.types_size) {
continue;
}
if (is_class_idx_in_code_classes(bin, bin->methods[i].class_id)) {
continue;
}
char *class_name = getstr (
bin, bin->types[bin->methods[i].class_id]
.descriptor_id);
if (!class_name) {
free (class_name);
continue;
}
len = strlen (class_name);
if (len < 1) {
continue;
}
class_name[len - 1] = 0; // remove last char ";"
char *method_name = dex_method_name (bin, i);
char *signature = dex_method_signature (bin, i);
if (method_name && *method_name) {
RBinImport *imp = R_NEW0 (RBinImport);
imp->name = r_str_newf ("%s.method.%s%s", class_name, method_name, signature);
imp->type = r_str_const ("FUNC");
imp->bind = r_str_const ("NONE");
imp->ordinal = import_count++;
r_list_append (bin->imports_list, imp);
RBinSymbol *sym = R_NEW0 (RBinSymbol);
sym->name = r_str_newf ("imp.%s", imp->name);
sym->type = r_str_const ("FUNC");
sym->bind = r_str_const ("NONE");
//XXX so damn unsafe check buffer boundaries!!!!
//XXX use r_buf API!!
sym->paddr = sym->vaddr = bin->b->base + bin->header.method_offset + (sizeof (struct dex_method_t) * i) ;
sym->ordinal = sym_count++;
r_list_append (bin->methods_list, sym);
sdb_num_set (mdb, sdb_fmt (0, "method.%d", i), sym->paddr, 0);
}
free (method_name);
free (signature);
free (class_name);
}
free (methods);
}
return true;
}
static RList* imports(RBinFile *arch) {
RBinDexObj *bin = (RBinDexObj*) arch->o->bin_obj;
if (!bin) {
return NULL;
}
if (bin && bin->imports_list) {
return bin->imports_list;
}
dex_loadcode (arch, bin);
return bin->imports_list;
}
static RList *methods(RBinFile *arch) {
RBinDexObj *bin;
if (!arch || !arch->o || !arch->o->bin_obj) {
return NULL;
}
bin = (RBinDexObj*) arch->o->bin_obj;
if (!bin->methods_list) {
dex_loadcode (arch, bin);
}
return bin->methods_list;
}
static RList *classes(RBinFile *arch) {
RBinDexObj *bin;
if (!arch || !arch->o || !arch->o->bin_obj) {
return NULL;
}
bin = (RBinDexObj*) arch->o->bin_obj;
if (!bin->classes_list) {
dex_loadcode (arch, bin);
}
return bin->classes_list;
}
static int already_entry(RList *entries, ut64 vaddr) {
RBinAddr *e;
RListIter *iter;
r_list_foreach (entries, iter, e) {
if (e->vaddr == vaddr) {
return 1;
}
}
return 0;
}
static RList *entries(RBinFile *arch) {
RListIter *iter;
RBinDexObj *bin;
RBinSymbol *m;
RBinAddr *ptr;
RList *ret;
if (!arch || !arch->o || !arch->o->bin_obj) {
return NULL;
}
bin = (RBinDexObj*) arch->o->bin_obj;
ret = r_list_newf ((RListFree)free);
if (!bin->methods_list) {
dex_loadcode (arch, bin);
}
// STEP 1. ".onCreate(Landroid/os/Bundle;)V"
r_list_foreach (bin->methods_list, iter, m) {
if (strlen (m->name) > 30 && m->bind &&
!strcmp(m->bind, "GLOBAL") &&
!strcmp (m->name + strlen (m->name) - 31,
".onCreate(Landroid/os/Bundle;)V")) {
if (!already_entry (ret, m->paddr)) {
if ((ptr = R_NEW0 (RBinAddr))) {
ptr->paddr = ptr->vaddr = m->paddr;
r_list_append (ret, ptr);
}
}
}
}
// STEP 2. ".main([Ljava/lang/String;)V"
if (r_list_empty (ret)) {
r_list_foreach (bin->methods_list, iter, m) {
if (strlen (m->name) > 26 &&
!strcmp (m->name + strlen (m->name) - 27,
".main([Ljava/lang/String;)V")) {
if (!already_entry (ret, m->paddr)) {
if ((ptr = R_NEW0 (RBinAddr))) {
ptr->paddr = ptr->vaddr = m->paddr;
r_list_append (ret, ptr);
}
}
}
}
}
// STEP 3. NOTHING FOUND POINT TO CODE_INIT
if (r_list_empty (ret)) {
if (!already_entry (ret, bin->code_from)) {
ptr = R_NEW0 (RBinAddr);
if (ptr) {
ptr->paddr = ptr->vaddr = bin->code_from;
r_list_append (ret, ptr);
}
}
}
return ret;
}
static ut64 offset_of_method_idx(RBinFile *arch, struct r_bin_dex_obj_t *dex, int idx) {
ut64 off = dex->header.method_offset + idx;
off = sdb_num_get (mdb, sdb_fmt (0, "method.%d", idx), 0);
return (ut64) off;
}
// TODO: change all return type for all getoffset
static int getoffset(RBinFile *arch, int type, int idx) {
struct r_bin_dex_obj_t *dex = arch->o->bin_obj;
switch (type) {
case 'm': // methods
// TODO: ADD CHECK
return offset_of_method_idx (arch, dex, idx);
case 'o': // objects
break;
case 's': // strings
if (dex->header.strings_size > idx) {
if (dex->strings) return dex->strings[idx];
}
break;
case 't': // type
return dex_get_type_offset (arch, idx);
case 'c': // class
return dex_get_type_offset (arch, idx);
//return sdb_num_get (cdb, sdb_fmt (0, "%d", idx), 0);
}
return -1;
}
static char *getname(RBinFile *arch, int type, int idx) {
struct r_bin_dex_obj_t *dex = arch->o->bin_obj;
switch (type) {
case 'm': // methods
return dex_method_fullname (dex, idx);
case 'c': // classes
return dex_class_name_byid (dex, idx);
case 'f': // fields
return dex_field_name (dex, idx);
}
return NULL;
}
static RList *sections(RBinFile *arch) {
struct r_bin_dex_obj_t *bin = arch->o->bin_obj;
RList *ml = methods (arch);
RBinSection *ptr = NULL;
int ns, fsymsz = 0;
RList *ret = NULL;
RListIter *iter;
RBinSymbol *m;
int fsym = 0;
r_list_foreach (ml, iter, m) {
if (!fsym || m->paddr < fsym) {
fsym = m->paddr;
}
ns = m->paddr + m->size;
if (ns > arch->buf->length) {
continue;
}
if (ns > fsymsz) {
fsymsz = ns;
}
}
if (!fsym) {
return NULL;
}
if (!(ret = r_list_new ())) {
return NULL;
}
ret->free = free;
if ((ptr = R_NEW0 (RBinSection))) {
strcpy (ptr->name, "header");
ptr->size = ptr->vsize = sizeof (struct dex_header_t);
ptr->paddr= ptr->vaddr = 0;
ptr->srwx = R_BIN_SCN_READABLE | R_BIN_SCN_MAP;
ptr->add = true;
r_list_append (ret, ptr);
}
if ((ptr = R_NEW0 (RBinSection))) {
strcpy (ptr->name, "constpool");
//ptr->size = ptr->vsize = fsym;
ptr->paddr= ptr->vaddr = sizeof (struct dex_header_t);
ptr->size = bin->code_from - ptr->vaddr; // fix size
ptr->srwx = R_BIN_SCN_READABLE | R_BIN_SCN_MAP;
ptr->add = true;
r_list_append (ret, ptr);
}
if ((ptr = R_NEW0 (RBinSection))) {
strcpy (ptr->name, "code");
ptr->vaddr = ptr->paddr = bin->code_from; //ptr->vaddr = fsym;
ptr->size = bin->code_to - ptr->paddr;
ptr->srwx = R_BIN_SCN_READABLE | R_BIN_SCN_EXECUTABLE | R_BIN_SCN_MAP;
ptr->add = true;
r_list_append (ret, ptr);
}
if ((ptr = R_NEW0 (RBinSection))) {
//ut64 sz = arch ? r_buf_size (arch->buf): 0;
strcpy (ptr->name, "data");
ptr->paddr = ptr->vaddr = fsymsz+fsym;
if (ptr->vaddr > arch->buf->length) {
ptr->paddr = ptr->vaddr = bin->code_to;
ptr->size = ptr->vsize = arch->buf->length - ptr->vaddr;
} else {
ptr->size = ptr->vsize = arch->buf->length - ptr->vaddr;
// hacky workaround
//dprintf ("Hack\n");
//ptr->size = ptr->vsize = 1024;
}
ptr->srwx = R_BIN_SCN_READABLE | R_BIN_SCN_MAP; //|2;
ptr->add = true;
r_list_append (ret, ptr);
}
return ret;
}
static void header(RBinFile *arch) {
struct r_bin_dex_obj_t *bin = arch->o->bin_obj;
struct r_bin_t *rbin = arch->rbin;
rbin->cb_printf ("DEX file header:\n");
rbin->cb_printf ("magic : 'dex\\n035\\0'\n");
rbin->cb_printf ("checksum : %x\n", bin->header.checksum);
rbin->cb_printf ("signature : %02x%02x...%02x%02x\n", bin->header.signature[0], bin->header.signature[1], bin->header.signature[18], bin->header.signature[19]);
rbin->cb_printf ("file_size : %d\n", bin->header.size);
rbin->cb_printf ("header_size : %d\n", bin->header.header_size);
rbin->cb_printf ("link_size : %d\n", bin->header.linksection_size);
rbin->cb_printf ("link_off : %d (0x%06x)\n", bin->header.linksection_offset, bin->header.linksection_offset);
rbin->cb_printf ("string_ids_size : %d\n", bin->header.strings_size);
rbin->cb_printf ("string_ids_off : %d (0x%06x)\n", bin->header.strings_offset, bin->header.strings_offset);
rbin->cb_printf ("type_ids_size : %d\n", bin->header.types_size);
rbin->cb_printf ("type_ids_off : %d (0x%06x)\n", bin->header.types_offset, bin->header.types_offset);
rbin->cb_printf ("proto_ids_size : %d\n", bin->header.prototypes_size);
rbin->cb_printf ("proto_ids_off : %d (0x%06x)\n", bin->header.prototypes_offset, bin->header.prototypes_offset);
rbin->cb_printf ("field_ids_size : %d\n", bin->header.fields_size);
rbin->cb_printf ("field_ids_off : %d (0x%06x)\n", bin->header.fields_offset, bin->header.fields_offset);
rbin->cb_printf ("method_ids_size : %d\n", bin->header.method_size);
rbin->cb_printf ("method_ids_off : %d (0x%06x)\n", bin->header.method_offset, bin->header.method_offset);
rbin->cb_printf ("class_defs_size : %d\n", bin->header.class_size);
rbin->cb_printf ("class_defs_off : %d (0x%06x)\n", bin->header.class_offset, bin->header.class_offset);
rbin->cb_printf ("data_size : %d\n", bin->header.data_size);
rbin->cb_printf ("data_off : %d (0x%06x)\n\n", bin->header.data_offset, bin->header.data_offset);
// TODO: print information stored in the RBIN not this ugly fix
dexdump = true;
bin->methods_list = NULL;
dex_loadcode (arch, bin);
dexdump = false;
}
static ut64 size(RBinFile *arch) {
int ret;
ut32 off = 0, len = 0;
ut8 u32s[sizeof (ut32)] = {0};
ret = r_buf_read_at (arch->buf, 108, u32s, 4);
if (ret != 4) {
return 0;
}
off = r_read_le32 (u32s);
ret = r_buf_read_at (arch->buf, 104, u32s, 4);
if (ret != 4) {
return 0;
}
len = r_read_le32 (u32s);
return off + len;
}
RBinPlugin r_bin_plugin_dex = {
.name = "dex",
.desc = "dex format bin plugin",
.license = "LGPL3",
.get_sdb = &get_sdb,
.load = &load,
.load_bytes = &load_bytes,
.check = &check,
.check_bytes = &check_bytes,
.baddr = &baddr,
.entries = entries,
.classes = classes,
.sections = sections,
.symbols = methods,
.imports = imports,
.strings = strings,
.info = &info,
.header = &header,
.size = &size,
.get_offset = &getoffset,
.get_name = &getname,
.dbginfo = &r_bin_dbginfo_dex,
};
#ifndef CORELIB
RLibStruct radare_plugin = {
.type = R_LIB_TYPE_BIN,
.data = &r_bin_plugin_dex,
.version = R2_VERSION
};
#endif
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3185_0 |
crossvul-cpp_data_bad_528_4 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
#include "vim.h"
#ifdef AMIGA
# include <time.h> /* for time() */
#endif
/*
* Vim originated from Stevie version 3.6 (Fish disk 217) by GRWalter (Fred)
* It has been changed beyond recognition since then.
*
* Differences between version 7.4 and 8.x can be found with ":help version8".
* Differences between version 6.4 and 7.x can be found with ":help version7".
* Differences between version 5.8 and 6.x can be found with ":help version6".
* Differences between version 4.x and 5.x can be found with ":help version5".
* Differences between version 3.0 and 4.x can be found with ":help version4".
* All the remarks about older versions have been removed, they are not very
* interesting.
*/
#include "version.h"
char *Version = VIM_VERSION_SHORT;
static char *mediumVersion = VIM_VERSION_MEDIUM;
#if defined(HAVE_DATE_TIME) || defined(PROTO)
# if (defined(VMS) && defined(VAXC)) || defined(PROTO)
char longVersion[sizeof(VIM_VERSION_LONG_DATE) + sizeof(__DATE__)
+ sizeof(__TIME__) + 3];
void
init_longVersion(void)
{
/*
* Construct the long version string. Necessary because
* VAX C can't catenate strings in the preprocessor.
*/
strcpy(longVersion, VIM_VERSION_LONG_DATE);
strcat(longVersion, __DATE__);
strcat(longVersion, " ");
strcat(longVersion, __TIME__);
strcat(longVersion, ")");
}
# else
void
init_longVersion(void)
{
char *date_time = __DATE__ " " __TIME__;
char *msg = _("%s (%s, compiled %s)");
size_t len = strlen(msg)
+ strlen(VIM_VERSION_LONG_ONLY)
+ strlen(VIM_VERSION_DATE_ONLY)
+ strlen(date_time);
longVersion = (char *)alloc((unsigned)len);
if (longVersion == NULL)
longVersion = VIM_VERSION_LONG;
else
vim_snprintf(longVersion, len, msg,
VIM_VERSION_LONG_ONLY, VIM_VERSION_DATE_ONLY, date_time);
}
# endif
#else
char *longVersion = VIM_VERSION_LONG;
void
init_longVersion(void)
{
// nothing to do
}
#endif
static char *(features[]) =
{
#ifdef HAVE_ACL
"+acl",
#else
"-acl",
#endif
#ifdef AMIGA /* only for Amiga systems */
# ifdef FEAT_ARP
"+ARP",
# else
"-ARP",
# endif
#endif
#ifdef FEAT_ARABIC
"+arabic",
#else
"-arabic",
#endif
"+autocmd",
#ifdef FEAT_AUTOCHDIR
"+autochdir",
#else
"-autochdir",
#endif
#ifdef FEAT_AUTOSERVERNAME
"+autoservername",
#else
"-autoservername",
#endif
#ifdef FEAT_BEVAL_GUI
"+balloon_eval",
#else
"-balloon_eval",
#endif
#ifdef FEAT_BEVAL_TERM
"+balloon_eval_term",
#else
"-balloon_eval_term",
#endif
#ifdef FEAT_BROWSE
"+browse",
#else
"-browse",
#endif
#ifdef NO_BUILTIN_TCAPS
"-builtin_terms",
#endif
#ifdef SOME_BUILTIN_TCAPS
"+builtin_terms",
#endif
#ifdef ALL_BUILTIN_TCAPS
"++builtin_terms",
#endif
#ifdef FEAT_BYTEOFF
"+byte_offset",
#else
"-byte_offset",
#endif
#ifdef FEAT_JOB_CHANNEL
"+channel",
#else
"-channel",
#endif
#ifdef FEAT_CINDENT
"+cindent",
#else
"-cindent",
#endif
#ifdef FEAT_CLIENTSERVER
"+clientserver",
#else
"-clientserver",
#endif
#ifdef FEAT_CLIPBOARD
"+clipboard",
#else
"-clipboard",
#endif
#ifdef FEAT_CMDL_COMPL
"+cmdline_compl",
#else
"-cmdline_compl",
#endif
#ifdef FEAT_CMDHIST
"+cmdline_hist",
#else
"-cmdline_hist",
#endif
#ifdef FEAT_CMDL_INFO
"+cmdline_info",
#else
"-cmdline_info",
#endif
#ifdef FEAT_COMMENTS
"+comments",
#else
"-comments",
#endif
#ifdef FEAT_CONCEAL
"+conceal",
#else
"-conceal",
#endif
#ifdef FEAT_CRYPT
"+cryptv",
#else
"-cryptv",
#endif
#ifdef FEAT_CSCOPE
"+cscope",
#else
"-cscope",
#endif
"+cursorbind",
#ifdef CURSOR_SHAPE
"+cursorshape",
#else
"-cursorshape",
#endif
#if defined(FEAT_CON_DIALOG) && defined(FEAT_GUI_DIALOG)
"+dialog_con_gui",
#else
# if defined(FEAT_CON_DIALOG)
"+dialog_con",
# else
# if defined(FEAT_GUI_DIALOG)
"+dialog_gui",
# else
"-dialog",
# endif
# endif
#endif
#ifdef FEAT_DIFF
"+diff",
#else
"-diff",
#endif
#ifdef FEAT_DIGRAPHS
"+digraphs",
#else
"-digraphs",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_DIRECTX
"+directx",
# else
"-directx",
# endif
#endif
#ifdef FEAT_DND
"+dnd",
#else
"-dnd",
#endif
#ifdef EBCDIC
"+ebcdic",
#else
"-ebcdic",
#endif
#ifdef FEAT_EMACS_TAGS
"+emacs_tags",
#else
"-emacs_tags",
#endif
#ifdef FEAT_EVAL
"+eval",
#else
"-eval",
#endif
"+ex_extra",
#ifdef FEAT_SEARCH_EXTRA
"+extra_search",
#else
"-extra_search",
#endif
#ifdef FEAT_FKMAP
"+farsi",
#else
"-farsi",
#endif
#ifdef FEAT_SEARCHPATH
"+file_in_path",
#else
"-file_in_path",
#endif
#ifdef FEAT_FIND_ID
"+find_in_path",
#else
"-find_in_path",
#endif
#ifdef FEAT_FLOAT
"+float",
#else
"-float",
#endif
#ifdef FEAT_FOLDING
"+folding",
#else
"-folding",
#endif
#ifdef FEAT_FOOTER
"+footer",
#else
"-footer",
#endif
/* only interesting on Unix systems */
#if !defined(USE_SYSTEM) && defined(UNIX)
"+fork()",
#endif
#ifdef FEAT_GETTEXT
# ifdef DYNAMIC_GETTEXT
"+gettext/dyn",
# else
"+gettext",
# endif
#else
"-gettext",
#endif
#ifdef FEAT_HANGULIN
"+hangul_input",
#else
"-hangul_input",
#endif
#if (defined(HAVE_ICONV_H) && defined(USE_ICONV)) || defined(DYNAMIC_ICONV)
# ifdef DYNAMIC_ICONV
"+iconv/dyn",
# else
"+iconv",
# endif
#else
"-iconv",
#endif
#ifdef FEAT_INS_EXPAND
"+insert_expand",
#else
"-insert_expand",
#endif
#ifdef FEAT_JOB_CHANNEL
"+job",
#else
"-job",
#endif
#ifdef FEAT_JUMPLIST
"+jumplist",
#else
"-jumplist",
#endif
#ifdef FEAT_KEYMAP
"+keymap",
#else
"-keymap",
#endif
#ifdef FEAT_EVAL
"+lambda",
#else
"-lambda",
#endif
#ifdef FEAT_LANGMAP
"+langmap",
#else
"-langmap",
#endif
#ifdef FEAT_LIBCALL
"+libcall",
#else
"-libcall",
#endif
#ifdef FEAT_LINEBREAK
"+linebreak",
#else
"-linebreak",
#endif
#ifdef FEAT_LISP
"+lispindent",
#else
"-lispindent",
#endif
"+listcmds",
#ifdef FEAT_LOCALMAP
"+localmap",
#else
"-localmap",
#endif
#ifdef FEAT_LUA
# ifdef DYNAMIC_LUA
"+lua/dyn",
# else
"+lua",
# endif
#else
"-lua",
#endif
#ifdef FEAT_MENU
"+menu",
#else
"-menu",
#endif
#ifdef FEAT_SESSION
"+mksession",
#else
"-mksession",
#endif
#ifdef FEAT_MODIFY_FNAME
"+modify_fname",
#else
"-modify_fname",
#endif
#ifdef FEAT_MOUSE
"+mouse",
# ifdef FEAT_MOUSESHAPE
"+mouseshape",
# else
"-mouseshape",
# endif
# else
"-mouse",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_DEC
"+mouse_dec",
# else
"-mouse_dec",
# endif
# ifdef FEAT_MOUSE_GPM
"+mouse_gpm",
# else
"-mouse_gpm",
# endif
# ifdef FEAT_MOUSE_JSB
"+mouse_jsbterm",
# else
"-mouse_jsbterm",
# endif
# ifdef FEAT_MOUSE_NET
"+mouse_netterm",
# else
"-mouse_netterm",
# endif
#endif
#ifdef __QNX__
# ifdef FEAT_MOUSE_PTERM
"+mouse_pterm",
# else
"-mouse_pterm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_SGR
"+mouse_sgr",
# else
"-mouse_sgr",
# endif
# ifdef FEAT_SYSMOUSE
"+mouse_sysmouse",
# else
"-mouse_sysmouse",
# endif
# ifdef FEAT_MOUSE_URXVT
"+mouse_urxvt",
# else
"-mouse_urxvt",
# endif
# ifdef FEAT_MOUSE_XTERM
"+mouse_xterm",
# else
"-mouse_xterm",
# endif
#endif
#ifdef FEAT_MBYTE_IME
# ifdef DYNAMIC_IME
"+multi_byte_ime/dyn",
# else
"+multi_byte_ime",
# endif
#else
# ifdef FEAT_MBYTE
"+multi_byte",
# else
"-multi_byte",
# endif
#endif
#ifdef FEAT_MULTI_LANG
"+multi_lang",
#else
"-multi_lang",
#endif
#ifdef FEAT_MZSCHEME
# ifdef DYNAMIC_MZSCHEME
"+mzscheme/dyn",
# else
"+mzscheme",
# endif
#else
"-mzscheme",
#endif
#ifdef FEAT_NETBEANS_INTG
"+netbeans_intg",
#else
"-netbeans_intg",
#endif
#ifdef FEAT_NUM64
"+num64",
#else
"-num64",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_OLE
"+ole",
# else
"-ole",
# endif
#endif
#ifdef FEAT_EVAL
"+packages",
#else
"-packages",
#endif
#ifdef FEAT_PATH_EXTRA
"+path_extra",
#else
"-path_extra",
#endif
#ifdef FEAT_PERL
# ifdef DYNAMIC_PERL
"+perl/dyn",
# else
"+perl",
# endif
#else
"-perl",
#endif
#ifdef FEAT_PERSISTENT_UNDO
"+persistent_undo",
#else
"-persistent_undo",
#endif
#ifdef FEAT_PRINTER
# ifdef FEAT_POSTSCRIPT
"+postscript",
# else
"-postscript",
# endif
"+printer",
#else
"-printer",
#endif
#ifdef FEAT_PROFILE
"+profile",
#else
"-profile",
#endif
#ifdef FEAT_PYTHON
# ifdef DYNAMIC_PYTHON
"+python/dyn",
# else
"+python",
# endif
#else
"-python",
#endif
#ifdef FEAT_PYTHON3
# ifdef DYNAMIC_PYTHON3
"+python3/dyn",
# else
"+python3",
# endif
#else
"-python3",
#endif
#ifdef FEAT_QUICKFIX
"+quickfix",
#else
"-quickfix",
#endif
#ifdef FEAT_RELTIME
"+reltime",
#else
"-reltime",
#endif
#ifdef FEAT_RIGHTLEFT
"+rightleft",
#else
"-rightleft",
#endif
#ifdef FEAT_RUBY
# ifdef DYNAMIC_RUBY
"+ruby/dyn",
# else
"+ruby",
# endif
#else
"-ruby",
#endif
"+scrollbind",
#ifdef FEAT_SIGNS
"+signs",
#else
"-signs",
#endif
#ifdef FEAT_SMARTINDENT
"+smartindent",
#else
"-smartindent",
#endif
#ifdef STARTUPTIME
"+startuptime",
#else
"-startuptime",
#endif
#ifdef FEAT_STL_OPT
"+statusline",
#else
"-statusline",
#endif
#ifdef FEAT_SUN_WORKSHOP
"+sun_workshop",
#else
"-sun_workshop",
#endif
#ifdef FEAT_SYN_HL
"+syntax",
#else
"-syntax",
#endif
/* only interesting on Unix systems */
#if defined(USE_SYSTEM) && defined(UNIX)
"+system()",
#endif
#ifdef FEAT_TAG_BINS
"+tag_binary",
#else
"-tag_binary",
#endif
#ifdef FEAT_TAG_OLDSTATIC
"+tag_old_static",
#else
"-tag_old_static",
#endif
#ifdef FEAT_TAG_ANYWHITE
"+tag_any_white",
#else
"-tag_any_white",
#endif
#ifdef FEAT_TCL
# ifdef DYNAMIC_TCL
"+tcl/dyn",
# else
"+tcl",
# endif
#else
"-tcl",
#endif
#ifdef FEAT_TERMGUICOLORS
"+termguicolors",
#else
"-termguicolors",
#endif
#ifdef FEAT_TERMINAL
"+terminal",
#else
"-terminal",
#endif
#if defined(UNIX)
/* only Unix can have terminfo instead of termcap */
# ifdef TERMINFO
"+terminfo",
# else
"-terminfo",
# endif
#endif
#ifdef FEAT_TERMRESPONSE
"+termresponse",
#else
"-termresponse",
#endif
#ifdef FEAT_TEXTOBJ
"+textobjects",
#else
"-textobjects",
#endif
#ifdef FEAT_TEXT_PROP
"+textprop",
#else
"-textprop",
#endif
#if !defined(UNIX)
/* unix always includes termcap support */
# ifdef HAVE_TGETENT
"+tgetent",
# else
"-tgetent",
# endif
#endif
#ifdef FEAT_TIMERS
"+timers",
#else
"-timers",
#endif
#ifdef FEAT_TITLE
"+title",
#else
"-title",
#endif
#ifdef FEAT_TOOLBAR
"+toolbar",
#else
"-toolbar",
#endif
#ifdef FEAT_USR_CMDS
"+user_commands",
#else
"-user_commands",
#endif
#ifdef FEAT_VARTABS
"+vartabs",
#else
"-vartabs",
#endif
"+vertsplit",
#ifdef FEAT_VIRTUALEDIT
"+virtualedit",
#else
"-virtualedit",
#endif
"+visual",
#ifdef FEAT_VISUALEXTRA
"+visualextra",
#else
"-visualextra",
#endif
#ifdef FEAT_VIMINFO
"+viminfo",
#else
"-viminfo",
#endif
"+vreplace",
#ifdef WIN3264
# ifdef FEAT_VTP
"+vtp",
# else
"-vtp",
# endif
#endif
#ifdef FEAT_WILDIGN
"+wildignore",
#else
"-wildignore",
#endif
#ifdef FEAT_WILDMENU
"+wildmenu",
#else
"-wildmenu",
#endif
"+windows",
#ifdef FEAT_WRITEBACKUP
"+writebackup",
#else
"-writebackup",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_X11
"+X11",
# else
"-X11",
# endif
#endif
#ifdef FEAT_XFONTSET
"+xfontset",
#else
"-xfontset",
#endif
#ifdef FEAT_XIM
"+xim",
#else
"-xim",
#endif
#ifdef WIN3264
# ifdef FEAT_XPM_W32
"+xpm_w32",
# else
"-xpm_w32",
# endif
#else
# ifdef HAVE_XPM
"+xpm",
# else
"-xpm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef USE_XSMP_INTERACT
"+xsmp_interact",
# else
# ifdef USE_XSMP
"+xsmp",
# else
"-xsmp",
# endif
# endif
# ifdef FEAT_XCLIPBOARD
"+xterm_clipboard",
# else
"-xterm_clipboard",
# endif
#endif
#ifdef FEAT_XTERM_SAVE
"+xterm_save",
#else
"-xterm_save",
#endif
NULL
};
static int included_patches[] =
{ /* Add new patch number below this line */
/**/
632,
/**/
631,
/**/
630,
/**/
629,
/**/
628,
/**/
627,
/**/
626,
/**/
625,
/**/
624,
/**/
623,
/**/
622,
/**/
621,
/**/
620,
/**/
619,
/**/
618,
/**/
617,
/**/
616,
/**/
615,
/**/
614,
/**/
613,
/**/
612,
/**/
611,
/**/
610,
/**/
609,
/**/
608,
/**/
607,
/**/
606,
/**/
605,
/**/
604,
/**/
603,
/**/
602,
/**/
601,
/**/
600,
/**/
599,
/**/
598,
/**/
597,
/**/
596,
/**/
595,
/**/
594,
/**/
593,
/**/
592,
/**/
591,
/**/
590,
/**/
589,
/**/
588,
/**/
587,
/**/
586,
/**/
585,
/**/
584,
/**/
583,
/**/
582,
/**/
581,
/**/
580,
/**/
579,
/**/
578,
/**/
577,
/**/
576,
/**/
575,
/**/
574,
/**/
573,
/**/
572,
/**/
571,
/**/
570,
/**/
569,
/**/
568,
/**/
567,
/**/
566,
/**/
565,
/**/
564,
/**/
563,
/**/
562,
/**/
561,
/**/
560,
/**/
559,
/**/
558,
/**/
557,
/**/
556,
/**/
555,
/**/
554,
/**/
553,
/**/
552,
/**/
551,
/**/
550,
/**/
549,
/**/
548,
/**/
547,
/**/
546,
/**/
545,
/**/
544,
/**/
543,
/**/
542,
/**/
541,
/**/
540,
/**/
539,
/**/
538,
/**/
537,
/**/
536,
/**/
535,
/**/
534,
/**/
533,
/**/
532,
/**/
531,
/**/
530,
/**/
529,
/**/
528,
/**/
527,
/**/
526,
/**/
525,
/**/
524,
/**/
523,
/**/
522,
/**/
521,
/**/
520,
/**/
519,
/**/
518,
/**/
517,
/**/
516,
/**/
515,
/**/
514,
/**/
513,
/**/
512,
/**/
511,
/**/
510,
/**/
509,
/**/
508,
/**/
507,
/**/
506,
/**/
505,
/**/
504,
/**/
503,
/**/
502,
/**/
501,
/**/
500,
/**/
499,
/**/
498,
/**/
497,
/**/
496,
/**/
495,
/**/
494,
/**/
493,
/**/
492,
/**/
491,
/**/
490,
/**/
489,
/**/
488,
/**/
487,
/**/
486,
/**/
485,
/**/
484,
/**/
483,
/**/
482,
/**/
481,
/**/
480,
/**/
479,
/**/
478,
/**/
477,
/**/
476,
/**/
475,
/**/
474,
/**/
473,
/**/
472,
/**/
471,
/**/
470,
/**/
469,
/**/
468,
/**/
467,
/**/
466,
/**/
465,
/**/
464,
/**/
463,
/**/
462,
/**/
461,
/**/
460,
/**/
459,
/**/
458,
/**/
457,
/**/
456,
/**/
455,
/**/
454,
/**/
453,
/**/
452,
/**/
451,
/**/
450,
/**/
449,
/**/
448,
/**/
447,
/**/
446,
/**/
445,
/**/
444,
/**/
443,
/**/
442,
/**/
441,
/**/
440,
/**/
439,
/**/
438,
/**/
437,
/**/
436,
/**/
435,
/**/
434,
/**/
433,
/**/
432,
/**/
431,
/**/
430,
/**/
429,
/**/
428,
/**/
427,
/**/
426,
/**/
425,
/**/
424,
/**/
423,
/**/
422,
/**/
421,
/**/
420,
/**/
419,
/**/
418,
/**/
417,
/**/
416,
/**/
415,
/**/
414,
/**/
413,
/**/
412,
/**/
411,
/**/
410,
/**/
409,
/**/
408,
/**/
407,
/**/
406,
/**/
405,
/**/
404,
/**/
403,
/**/
402,
/**/
401,
/**/
400,
/**/
399,
/**/
398,
/**/
397,
/**/
396,
/**/
395,
/**/
394,
/**/
393,
/**/
392,
/**/
391,
/**/
390,
/**/
389,
/**/
388,
/**/
387,
/**/
386,
/**/
385,
/**/
384,
/**/
383,
/**/
382,
/**/
381,
/**/
380,
/**/
379,
/**/
378,
/**/
377,
/**/
376,
/**/
375,
/**/
374,
/**/
373,
/**/
372,
/**/
371,
/**/
370,
/**/
369,
/**/
368,
/**/
367,
/**/
366,
/**/
365,
/**/
364,
/**/
363,
/**/
362,
/**/
361,
/**/
360,
/**/
359,
/**/
358,
/**/
357,
/**/
356,
/**/
355,
/**/
354,
/**/
353,
/**/
352,
/**/
351,
/**/
350,
/**/
349,
/**/
348,
/**/
347,
/**/
346,
/**/
345,
/**/
344,
/**/
343,
/**/
342,
/**/
341,
/**/
340,
/**/
339,
/**/
338,
/**/
337,
/**/
336,
/**/
335,
/**/
334,
/**/
333,
/**/
332,
/**/
331,
/**/
330,
/**/
329,
/**/
328,
/**/
327,
/**/
326,
/**/
325,
/**/
324,
/**/
323,
/**/
322,
/**/
321,
/**/
320,
/**/
319,
/**/
318,
/**/
317,
/**/
316,
/**/
315,
/**/
314,
/**/
313,
/**/
312,
/**/
311,
/**/
310,
/**/
309,
/**/
308,
/**/
307,
/**/
306,
/**/
305,
/**/
304,
/**/
303,
/**/
302,
/**/
301,
/**/
300,
/**/
299,
/**/
298,
/**/
297,
/**/
296,
/**/
295,
/**/
294,
/**/
293,
/**/
292,
/**/
291,
/**/
290,
/**/
289,
/**/
288,
/**/
287,
/**/
286,
/**/
285,
/**/
284,
/**/
283,
/**/
282,
/**/
281,
/**/
280,
/**/
279,
/**/
278,
/**/
277,
/**/
276,
/**/
275,
/**/
274,
/**/
273,
/**/
272,
/**/
271,
/**/
270,
/**/
269,
/**/
268,
/**/
267,
/**/
266,
/**/
265,
/**/
264,
/**/
263,
/**/
262,
/**/
261,
/**/
260,
/**/
259,
/**/
258,
/**/
257,
/**/
256,
/**/
255,
/**/
254,
/**/
253,
/**/
252,
/**/
251,
/**/
250,
/**/
249,
/**/
248,
/**/
247,
/**/
246,
/**/
245,
/**/
244,
/**/
243,
/**/
242,
/**/
241,
/**/
240,
/**/
239,
/**/
238,
/**/
237,
/**/
236,
/**/
235,
/**/
234,
/**/
233,
/**/
232,
/**/
231,
/**/
230,
/**/
229,
/**/
228,
/**/
227,
/**/
226,
/**/
225,
/**/
224,
/**/
223,
/**/
222,
/**/
221,
/**/
220,
/**/
219,
/**/
218,
/**/
217,
/**/
216,
/**/
215,
/**/
214,
/**/
213,
/**/
212,
/**/
211,
/**/
210,
/**/
209,
/**/
208,
/**/
207,
/**/
206,
/**/
205,
/**/
204,
/**/
203,
/**/
202,
/**/
201,
/**/
200,
/**/
199,
/**/
198,
/**/
197,
/**/
196,
/**/
195,
/**/
194,
/**/
193,
/**/
192,
/**/
191,
/**/
190,
/**/
189,
/**/
188,
/**/
187,
/**/
186,
/**/
185,
/**/
184,
/**/
183,
/**/
182,
/**/
181,
/**/
180,
/**/
179,
/**/
178,
/**/
177,
/**/
176,
/**/
175,
/**/
174,
/**/
173,
/**/
172,
/**/
171,
/**/
170,
/**/
169,
/**/
168,
/**/
167,
/**/
166,
/**/
165,
/**/
164,
/**/
163,
/**/
162,
/**/
161,
/**/
160,
/**/
159,
/**/
158,
/**/
157,
/**/
156,
/**/
155,
/**/
154,
/**/
153,
/**/
152,
/**/
151,
/**/
150,
/**/
149,
/**/
148,
/**/
147,
/**/
146,
/**/
145,
/**/
144,
/**/
143,
/**/
142,
/**/
141,
/**/
140,
/**/
139,
/**/
138,
/**/
137,
/**/
136,
/**/
135,
/**/
134,
/**/
133,
/**/
132,
/**/
131,
/**/
130,
/**/
129,
/**/
128,
/**/
127,
/**/
126,
/**/
125,
/**/
124,
/**/
123,
/**/
122,
/**/
121,
/**/
120,
/**/
119,
/**/
118,
/**/
117,
/**/
116,
/**/
115,
/**/
114,
/**/
113,
/**/
112,
/**/
111,
/**/
110,
/**/
109,
/**/
108,
/**/
107,
/**/
106,
/**/
105,
/**/
104,
/**/
103,
/**/
102,
/**/
101,
/**/
100,
/**/
99,
/**/
98,
/**/
97,
/**/
96,
/**/
95,
/**/
94,
/**/
93,
/**/
92,
/**/
91,
/**/
90,
/**/
89,
/**/
88,
/**/
87,
/**/
86,
/**/
85,
/**/
84,
/**/
83,
/**/
82,
/**/
81,
/**/
80,
/**/
79,
/**/
78,
/**/
77,
/**/
76,
/**/
75,
/**/
74,
/**/
73,
/**/
72,
/**/
71,
/**/
70,
/**/
69,
/**/
68,
/**/
67,
/**/
66,
/**/
65,
/**/
64,
/**/
63,
/**/
62,
/**/
61,
/**/
60,
/**/
59,
/**/
58,
/**/
57,
/**/
56,
/**/
55,
/**/
54,
/**/
53,
/**/
52,
/**/
51,
/**/
50,
/**/
49,
/**/
48,
/**/
47,
/**/
46,
/**/
45,
/**/
44,
/**/
43,
/**/
42,
/**/
41,
/**/
40,
/**/
39,
/**/
38,
/**/
37,
/**/
36,
/**/
35,
/**/
34,
/**/
33,
/**/
32,
/**/
31,
/**/
30,
/**/
29,
/**/
28,
/**/
27,
/**/
26,
/**/
25,
/**/
24,
/**/
23,
/**/
22,
/**/
21,
/**/
20,
/**/
19,
/**/
18,
/**/
17,
/**/
16,
/**/
15,
/**/
14,
/**/
13,
/**/
12,
/**/
11,
/**/
10,
/**/
9,
/**/
8,
/**/
7,
/**/
6,
/**/
5,
/**/
4,
/**/
3,
/**/
2,
/**/
1,
/**/
0
};
/*
* Place to put a short description when adding a feature with a patch.
* Keep it short, e.g.,: "relative numbers", "persistent undo".
* Also add a comment marker to separate the lines.
* See the official Vim patches for the diff format: It must use a context of
* one line only. Create it by hand or use "diff -C2" and edit the patch.
*/
static char *(extra_patches[]) =
{ /* Add your patch description below this line */
/**/
NULL
};
int
highest_patch(void)
{
int i;
int h = 0;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] > h)
h = included_patches[i];
return h;
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return TRUE if patch "n" has been included.
*/
int
has_patch(int n)
{
int i;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] == n)
return TRUE;
return FALSE;
}
#endif
void
ex_version(exarg_T *eap)
{
/*
* Ignore a ":version 9.99" command.
*/
if (*eap->arg == NUL)
{
msg_putchar('\n');
list_version();
}
}
/*
* Output a string for the version message. If it's going to wrap, output a
* newline, unless the message is too long to fit on the screen anyway.
* When "wrap" is TRUE wrap the string in [].
*/
static void
version_msg_wrap(char_u *s, int wrap)
{
int len = (int)vim_strsize(s) + (wrap ? 2 : 0);
if (!got_int && len < (int)Columns && msg_col + len >= (int)Columns
&& *s != '\n')
msg_putchar('\n');
if (!got_int)
{
if (wrap)
MSG_PUTS("[");
MSG_PUTS(s);
if (wrap)
MSG_PUTS("]");
}
}
static void
version_msg(char *s)
{
version_msg_wrap((char_u *)s, FALSE);
}
/*
* List all features aligned in columns, dictionary style.
*/
static void
list_features(void)
{
list_in_columns((char_u **)features, -1, -1);
}
/*
* List string items nicely aligned in columns.
* When "size" is < 0 then the last entry is marked with NULL.
* The entry with index "current" is inclosed in [].
*/
void
list_in_columns(char_u **items, int size, int current)
{
int i;
int ncol;
int nrow;
int item_count = 0;
int width = 0;
/* Find the length of the longest item, use that + 1 as the column
* width. */
for (i = 0; size < 0 ? items[i] != NULL : i < size; ++i)
{
int l = (int)vim_strsize(items[i]) + (i == current ? 2 : 0);
if (l > width)
width = l;
++item_count;
}
width += 1;
if (Columns < width)
{
/* Not enough screen columns - show one per line */
for (i = 0; i < item_count; ++i)
{
version_msg_wrap(items[i], i == current);
if (msg_col > 0)
msg_putchar('\n');
}
return;
}
/* The rightmost column doesn't need a separator.
* Sacrifice it to fit in one more column if possible. */
ncol = (int) (Columns + 1) / width;
nrow = item_count / ncol + (item_count % ncol ? 1 : 0);
/* i counts columns then rows. idx counts rows then columns. */
for (i = 0; !got_int && i < nrow * ncol; ++i)
{
int idx = (i / ncol) + (i % ncol) * nrow;
if (idx < item_count)
{
int last_col = (i + 1) % ncol == 0;
if (idx == current)
msg_putchar('[');
msg_puts(items[idx]);
if (idx == current)
msg_putchar(']');
if (last_col)
{
if (msg_col > 0)
msg_putchar('\n');
}
else
{
while (msg_col % width)
msg_putchar(' ');
}
}
else
{
if (msg_col > 0)
msg_putchar('\n');
}
}
}
void
list_version(void)
{
int i;
int first;
char *s = "";
/*
* When adding features here, don't forget to update the list of
* internal variables in eval.c!
*/
init_longVersion();
MSG(longVersion);
#ifdef WIN3264
# ifdef FEAT_GUI_W32
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit GUI version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit GUI version"));
# endif
# ifdef FEAT_OLE
MSG_PUTS(_(" with OLE support"));
# endif
# else
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit console version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit console version"));
# endif
# endif
#endif
#if defined(MACOS_X)
# if defined(MACOS_X_DARWIN)
MSG_PUTS(_("\nmacOS version"));
# else
MSG_PUTS(_("\nmacOS version w/o darwin feat."));
# endif
#endif
#ifdef VMS
MSG_PUTS(_("\nOpenVMS version"));
# ifdef HAVE_PATHDEF
if (*compiled_arch != NUL)
{
MSG_PUTS(" - ");
MSG_PUTS(compiled_arch);
}
# endif
#endif
/* Print the list of patch numbers if there is at least one. */
/* Print a range when patches are consecutive: "1-10, 12, 15-40, 42-45" */
if (included_patches[0] != 0)
{
MSG_PUTS(_("\nIncluded patches: "));
first = -1;
/* find last one */
for (i = 0; included_patches[i] != 0; ++i)
;
while (--i >= 0)
{
if (first < 0)
first = included_patches[i];
if (i == 0 || included_patches[i - 1] != included_patches[i] + 1)
{
MSG_PUTS(s);
s = ", ";
msg_outnum((long)first);
if (first != included_patches[i])
{
MSG_PUTS("-");
msg_outnum((long)included_patches[i]);
}
first = -1;
}
}
}
/* Print the list of extra patch descriptions if there is at least one. */
if (extra_patches[0] != NULL)
{
MSG_PUTS(_("\nExtra patches: "));
s = "";
for (i = 0; extra_patches[i] != NULL; ++i)
{
MSG_PUTS(s);
s = ", ";
MSG_PUTS(extra_patches[i]);
}
}
#ifdef MODIFIED_BY
MSG_PUTS("\n");
MSG_PUTS(_("Modified by "));
MSG_PUTS(MODIFIED_BY);
#endif
#ifdef HAVE_PATHDEF
if (*compiled_user != NUL || *compiled_sys != NUL)
{
MSG_PUTS(_("\nCompiled "));
if (*compiled_user != NUL)
{
MSG_PUTS(_("by "));
MSG_PUTS(compiled_user);
}
if (*compiled_sys != NUL)
{
MSG_PUTS("@");
MSG_PUTS(compiled_sys);
}
}
#endif
#ifdef FEAT_HUGE
MSG_PUTS(_("\nHuge version "));
#else
# ifdef FEAT_BIG
MSG_PUTS(_("\nBig version "));
# else
# ifdef FEAT_NORMAL
MSG_PUTS(_("\nNormal version "));
# else
# ifdef FEAT_SMALL
MSG_PUTS(_("\nSmall version "));
# else
MSG_PUTS(_("\nTiny version "));
# endif
# endif
# endif
#endif
#ifndef FEAT_GUI
MSG_PUTS(_("without GUI."));
#else
# ifdef FEAT_GUI_GTK
# ifdef USE_GTK3
MSG_PUTS(_("with GTK3 GUI."));
# else
# ifdef FEAT_GUI_GNOME
MSG_PUTS(_("with GTK2-GNOME GUI."));
# else
MSG_PUTS(_("with GTK2 GUI."));
# endif
# endif
# else
# ifdef FEAT_GUI_MOTIF
MSG_PUTS(_("with X11-Motif GUI."));
# else
# ifdef FEAT_GUI_ATHENA
# ifdef FEAT_GUI_NEXTAW
MSG_PUTS(_("with X11-neXtaw GUI."));
# else
MSG_PUTS(_("with X11-Athena GUI."));
# endif
# else
# ifdef FEAT_GUI_PHOTON
MSG_PUTS(_("with Photon GUI."));
# else
# if defined(MSWIN)
MSG_PUTS(_("with GUI."));
# else
# if defined(TARGET_API_MAC_CARBON) && TARGET_API_MAC_CARBON
MSG_PUTS(_("with Carbon GUI."));
# else
# if defined(TARGET_API_MAC_OSX) && TARGET_API_MAC_OSX
MSG_PUTS(_("with Cocoa GUI."));
# else
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
version_msg(_(" Features included (+) or not (-):\n"));
list_features();
#ifdef SYS_VIMRC_FILE
version_msg(_(" system vimrc file: \""));
version_msg(SYS_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE
version_msg(_(" user vimrc file: \""));
version_msg(USR_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE2
version_msg(_(" 2nd user vimrc file: \""));
version_msg(USR_VIMRC_FILE2);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE3
version_msg(_(" 3rd user vimrc file: \""));
version_msg(USR_VIMRC_FILE3);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE
version_msg(_(" user exrc file: \""));
version_msg(USR_EXRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE2
version_msg(_(" 2nd user exrc file: \""));
version_msg(USR_EXRC_FILE2);
version_msg("\"\n");
#endif
#ifdef FEAT_GUI
# ifdef SYS_GVIMRC_FILE
version_msg(_(" system gvimrc file: \""));
version_msg(SYS_GVIMRC_FILE);
version_msg("\"\n");
# endif
version_msg(_(" user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE);
version_msg("\"\n");
# ifdef USR_GVIMRC_FILE2
version_msg(_("2nd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE2);
version_msg("\"\n");
# endif
# ifdef USR_GVIMRC_FILE3
version_msg(_("3rd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE3);
version_msg("\"\n");
# endif
#endif
version_msg(_(" defaults file: \""));
version_msg(VIM_DEFAULTS_FILE);
version_msg("\"\n");
#ifdef FEAT_GUI
# ifdef SYS_MENU_FILE
version_msg(_(" system menu file: \""));
version_msg(SYS_MENU_FILE);
version_msg("\"\n");
# endif
#endif
#ifdef HAVE_PATHDEF
if (*default_vim_dir != NUL)
{
version_msg(_(" fall-back for $VIM: \""));
version_msg((char *)default_vim_dir);
version_msg("\"\n");
}
if (*default_vimruntime_dir != NUL)
{
version_msg(_(" f-b for $VIMRUNTIME: \""));
version_msg((char *)default_vimruntime_dir);
version_msg("\"\n");
}
version_msg(_("Compilation: "));
version_msg((char *)all_cflags);
version_msg("\n");
#ifdef VMS
if (*compiler_version != NUL)
{
version_msg(_("Compiler: "));
version_msg((char *)compiler_version);
version_msg("\n");
}
#endif
version_msg(_("Linking: "));
version_msg((char *)all_lflags);
#endif
#ifdef DEBUG
version_msg("\n");
version_msg(_(" DEBUG BUILD"));
#endif
}
static void do_intro_line(int row, char_u *mesg, int add_version, int attr);
/*
* Show the intro message when not editing a file.
*/
void
maybe_intro_message(void)
{
if (BUFEMPTY()
&& curbuf->b_fname == NULL
&& firstwin->w_next == NULL
&& vim_strchr(p_shm, SHM_INTRO) == NULL)
intro_message(FALSE);
}
/*
* Give an introductory message about Vim.
* Only used when starting Vim on an empty file, without a file name.
* Or with the ":intro" command (for Sven :-).
*/
void
intro_message(
int colon) /* TRUE for ":intro" */
{
int i;
int row;
int blanklines;
int sponsor;
char *p;
static char *(lines[]) =
{
N_("VIM - Vi IMproved"),
"",
N_("version "),
N_("by Bram Moolenaar et al."),
#ifdef MODIFIED_BY
" ",
#endif
N_("Vim is open source and freely distributable"),
"",
N_("Help poor children in Uganda!"),
N_("type :help iccf<Enter> for information "),
"",
N_("type :q<Enter> to exit "),
N_("type :help<Enter> or <F1> for on-line help"),
N_("type :help version8<Enter> for version info"),
NULL,
"",
N_("Running in Vi compatible mode"),
N_("type :set nocp<Enter> for Vim defaults"),
N_("type :help cp-default<Enter> for info on this"),
};
#ifdef FEAT_GUI
static char *(gui_lines[]) =
{
NULL,
NULL,
NULL,
NULL,
#ifdef MODIFIED_BY
NULL,
#endif
NULL,
NULL,
NULL,
N_("menu Help->Orphans for information "),
NULL,
N_("Running modeless, typed text is inserted"),
N_("menu Edit->Global Settings->Toggle Insert Mode "),
N_(" for two modes "),
NULL,
NULL,
NULL,
N_("menu Edit->Global Settings->Toggle Vi Compatible"),
N_(" for Vim defaults "),
};
#endif
/* blanklines = screen height - # message lines */
blanklines = (int)Rows - ((sizeof(lines) / sizeof(char *)) - 1);
if (!p_cp)
blanklines += 4; /* add 4 for not showing "Vi compatible" message */
/* Don't overwrite a statusline. Depends on 'cmdheight'. */
if (p_ls > 1)
blanklines -= Rows - topframe->fr_height;
if (blanklines < 0)
blanklines = 0;
/* Show the sponsor and register message one out of four times, the Uganda
* message two out of four times. */
sponsor = (int)time(NULL);
sponsor = ((sponsor & 2) == 0) - ((sponsor & 4) == 0);
/* start displaying the message lines after half of the blank lines */
row = blanklines / 2;
if ((row >= 2 && Columns >= 50) || colon)
{
for (i = 0; i < (int)(sizeof(lines) / sizeof(char *)); ++i)
{
p = lines[i];
#ifdef FEAT_GUI
if (p_im && gui.in_use && gui_lines[i] != NULL)
p = gui_lines[i];
#endif
if (p == NULL)
{
if (!p_cp)
break;
continue;
}
if (sponsor != 0)
{
if (strstr(p, "children") != NULL)
p = sponsor < 0
? N_("Sponsor Vim development!")
: N_("Become a registered Vim user!");
else if (strstr(p, "iccf") != NULL)
p = sponsor < 0
? N_("type :help sponsor<Enter> for information ")
: N_("type :help register<Enter> for information ");
else if (strstr(p, "Orphans") != NULL)
p = N_("menu Help->Sponsor/Register for information ");
}
if (*p != NUL)
do_intro_line(row, (char_u *)_(p), i == 2, 0);
++row;
}
}
/* Make the wait-return message appear just below the text. */
if (colon)
msg_row = row;
}
static void
do_intro_line(
int row,
char_u *mesg,
int add_version,
int attr)
{
char_u vers[20];
int col;
char_u *p;
int l;
int clen;
#ifdef MODIFIED_BY
# define MODBY_LEN 150
char_u modby[MODBY_LEN];
if (*mesg == ' ')
{
vim_strncpy(modby, (char_u *)_("Modified by "), MODBY_LEN - 1);
l = (int)STRLEN(modby);
vim_strncpy(modby + l, (char_u *)MODIFIED_BY, MODBY_LEN - l - 1);
mesg = modby;
}
#endif
/* Center the message horizontally. */
col = vim_strsize(mesg);
if (add_version)
{
STRCPY(vers, mediumVersion);
if (highest_patch())
{
/* Check for 9.9x or 9.9xx, alpha/beta version */
if (isalpha((int)vers[3]))
{
int len = (isalpha((int)vers[4])) ? 5 : 4;
sprintf((char *)vers + len, ".%d%s", highest_patch(),
mediumVersion + len);
}
else
sprintf((char *)vers + 3, ".%d", highest_patch());
}
col += (int)STRLEN(vers);
}
col = (Columns - col) / 2;
if (col < 0)
col = 0;
/* Split up in parts to highlight <> items differently. */
for (p = mesg; *p != NUL; p += l)
{
clen = 0;
for (l = 0; p[l] != NUL
&& (l == 0 || (p[l] != '<' && p[l - 1] != '>')); ++l)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
clen += ptr2cells(p + l);
l += (*mb_ptr2len)(p + l) - 1;
}
else
#endif
clen += byte2cells(p[l]);
}
screen_puts_len(p, l, row, col, *p == '<' ? HL_ATTR(HLF_8) : attr);
col += clen;
}
/* Add the version number to the version line. */
if (add_version)
screen_puts(vers, row, col, 0);
}
/*
* ":intro": clear screen, display intro screen and wait for return.
*/
void
ex_intro(exarg_T *eap UNUSED)
{
screenclear();
intro_message(TRUE);
wait_return(TRUE);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_528_4 |
crossvul-cpp_data_good_321_0 | /************************************************************
* Copyright (c) 1994 by Silicon Graphics Computer Systems, Inc.
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of Silicon Graphics not be
* used in advertising or publicity pertaining to distribution
* of the software without specific prior written permission.
* Silicon Graphics makes no representation about the suitability
* of this software for any purpose. It is provided "as is"
* without any express or implied warranty.
*
* SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
* GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH
* THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
********************************************************/
#include "xkbcomp-priv.h"
#include "text.h"
#include "expr.h"
#include "include.h"
typedef struct {
enum merge_mode merge;
xkb_atom_t alias;
xkb_atom_t real;
} AliasInfo;
typedef struct {
enum merge_mode merge;
xkb_atom_t name;
} LedNameInfo;
typedef struct {
char *name;
int errorCount;
xkb_keycode_t min_key_code;
xkb_keycode_t max_key_code;
darray(xkb_atom_t) key_names;
LedNameInfo led_names[XKB_MAX_LEDS];
unsigned int num_led_names;
darray(AliasInfo) aliases;
struct xkb_context *ctx;
} KeyNamesInfo;
/***====================================================================***/
static void
InitAliasInfo(AliasInfo *info, enum merge_mode merge,
xkb_atom_t alias, xkb_atom_t real)
{
memset(info, 0, sizeof(*info));
info->merge = merge;
info->alias = alias;
info->real = real;
}
static LedNameInfo *
FindLedByName(KeyNamesInfo *info, xkb_atom_t name,
xkb_led_index_t *idx_out)
{
for (xkb_led_index_t idx = 0; idx < info->num_led_names; idx++) {
LedNameInfo *ledi = &info->led_names[idx];
if (ledi->name == name) {
*idx_out = idx;
return ledi;
}
}
return NULL;
}
static bool
AddLedName(KeyNamesInfo *info, enum merge_mode merge, bool same_file,
LedNameInfo *new, xkb_led_index_t new_idx)
{
xkb_led_index_t old_idx;
LedNameInfo *old;
const int verbosity = xkb_context_get_log_verbosity(info->ctx);
const bool report = (same_file && verbosity > 0) || verbosity > 9;
const bool replace = (merge == MERGE_REPLACE || merge == MERGE_OVERRIDE);
/* LED with the same name already exists. */
old = FindLedByName(info, new->name, &old_idx);
if (old) {
if (old_idx == new_idx) {
log_warn(info->ctx,
"Multiple indicators named \"%s\"; "
"Identical definitions ignored\n",
xkb_atom_text(info->ctx, new->name));
return true;
}
if (report) {
xkb_led_index_t use = (replace ? new_idx + 1 : old_idx + 1);
xkb_led_index_t ignore = (replace ? old_idx + 1 : new_idx + 1);
log_warn(info->ctx,
"Multiple indicators named %s; Using %d, ignoring %d\n",
xkb_atom_text(info->ctx, new->name), use, ignore);
}
if (replace)
*old = *new;
return true;
}
if (new_idx >= info->num_led_names)
info->num_led_names = new_idx + 1;
/* LED with the same index already exists. */
old = &info->led_names[new_idx];
if (old->name != XKB_ATOM_NONE) {
if (report) {
const xkb_atom_t use = (replace ? new->name : old->name);
const xkb_atom_t ignore = (replace ? old->name : new->name);
log_warn(info->ctx, "Multiple names for indicator %d; "
"Using %s, ignoring %s\n", new_idx + 1,
xkb_atom_text(info->ctx, use),
xkb_atom_text(info->ctx, ignore));
}
if (replace)
*old = *new;
return true;
}
*old = *new;
return true;
}
static void
ClearKeyNamesInfo(KeyNamesInfo *info)
{
free(info->name);
darray_free(info->key_names);
darray_free(info->aliases);
}
static void
InitKeyNamesInfo(KeyNamesInfo *info, struct xkb_context *ctx)
{
memset(info, 0, sizeof(*info));
info->ctx = ctx;
info->min_key_code = XKB_KEYCODE_INVALID;
#if XKB_KEYCODE_INVALID < XKB_KEYCODE_MAX
#error "Hey, you can't be changing stuff like that."
#endif
}
static xkb_keycode_t
FindKeyByName(KeyNamesInfo *info, xkb_atom_t name)
{
xkb_keycode_t i;
for (i = info->min_key_code; i <= info->max_key_code; i++)
if (darray_item(info->key_names, i) == name)
return i;
return XKB_KEYCODE_INVALID;
}
static bool
AddKeyName(KeyNamesInfo *info, xkb_keycode_t kc, xkb_atom_t name,
enum merge_mode merge, bool same_file, bool report)
{
xkb_atom_t old_name;
xkb_keycode_t old_kc;
const int verbosity = xkb_context_get_log_verbosity(info->ctx);
report = report && ((same_file && verbosity > 0) || verbosity > 7);
if (kc >= darray_size(info->key_names))
darray_resize0(info->key_names, kc + 1);
info->min_key_code = MIN(info->min_key_code, kc);
info->max_key_code = MAX(info->max_key_code, kc);
/* There's already a key with this keycode. */
old_name = darray_item(info->key_names, kc);
if (old_name != XKB_ATOM_NONE) {
const char *lname = KeyNameText(info->ctx, old_name);
const char *kname = KeyNameText(info->ctx, name);
if (old_name == name) {
if (report)
log_warn(info->ctx,
"Multiple identical key name definitions; "
"Later occurrences of \"%s = %d\" ignored\n",
lname, kc);
return true;
}
else if (merge == MERGE_AUGMENT) {
if (report)
log_warn(info->ctx,
"Multiple names for keycode %d; "
"Using %s, ignoring %s\n", kc, lname, kname);
return true;
}
else {
if (report)
log_warn(info->ctx,
"Multiple names for keycode %d; "
"Using %s, ignoring %s\n", kc, kname, lname);
darray_item(info->key_names, kc) = XKB_ATOM_NONE;
}
}
/* There's already a key with this name. */
old_kc = FindKeyByName(info, name);
if (old_kc != XKB_KEYCODE_INVALID && old_kc != kc) {
const char *kname = KeyNameText(info->ctx, name);
if (merge == MERGE_OVERRIDE) {
darray_item(info->key_names, old_kc) = XKB_ATOM_NONE;
if (report)
log_warn(info->ctx,
"Key name %s assigned to multiple keys; "
"Using %d, ignoring %d\n", kname, kc, old_kc);
}
else {
if (report)
log_vrb(info->ctx, 3,
"Key name %s assigned to multiple keys; "
"Using %d, ignoring %d\n", kname, old_kc, kc);
return true;
}
}
darray_item(info->key_names, kc) = name;
return true;
}
/***====================================================================***/
static bool
HandleAliasDef(KeyNamesInfo *info, KeyAliasDef *def, enum merge_mode merge);
static void
MergeIncludedKeycodes(KeyNamesInfo *into, KeyNamesInfo *from,
enum merge_mode merge)
{
if (from->errorCount > 0) {
into->errorCount += from->errorCount;
return;
}
if (into->name == NULL) {
into->name = from->name;
from->name = NULL;
}
/* Merge key names. */
if (darray_empty(into->key_names)) {
into->key_names = from->key_names;
darray_init(from->key_names);
into->min_key_code = from->min_key_code;
into->max_key_code = from->max_key_code;
}
else {
if (darray_size(into->key_names) < darray_size(from->key_names))
darray_resize0(into->key_names, darray_size(from->key_names));
for (unsigned i = from->min_key_code; i <= from->max_key_code; i++) {
xkb_atom_t name = darray_item(from->key_names, i);
if (name == XKB_ATOM_NONE)
continue;
if (!AddKeyName(into, i, name, merge, true, false))
into->errorCount++;
}
}
/* Merge key aliases. */
if (darray_empty(into->aliases)) {
into->aliases = from->aliases;
darray_init(from->aliases);
}
else {
AliasInfo *alias;
darray_foreach(alias, from->aliases) {
KeyAliasDef def;
def.merge = (merge == MERGE_DEFAULT ? alias->merge : merge);
def.alias = alias->alias;
def.real = alias->real;
if (!HandleAliasDef(into, &def, def.merge))
into->errorCount++;
}
}
/* Merge LED names. */
if (into->num_led_names == 0) {
memcpy(into->led_names, from->led_names,
sizeof(*from->led_names) * from->num_led_names);
into->num_led_names = from->num_led_names;
from->num_led_names = 0;
}
else {
for (xkb_led_index_t idx = 0; idx < from->num_led_names; idx++) {
LedNameInfo *ledi = &from->led_names[idx];
if (ledi->name == XKB_ATOM_NONE)
continue;
ledi->merge = (merge == MERGE_DEFAULT ? ledi->merge : merge);
if (!AddLedName(into, ledi->merge, false, ledi, idx))
into->errorCount++;
}
}
}
static void
HandleKeycodesFile(KeyNamesInfo *info, XkbFile *file, enum merge_mode merge);
static bool
HandleIncludeKeycodes(KeyNamesInfo *info, IncludeStmt *include)
{
KeyNamesInfo included;
InitKeyNamesInfo(&included, info->ctx);
included.name = include->stmt;
include->stmt = NULL;
for (IncludeStmt *stmt = include; stmt; stmt = stmt->next_incl) {
KeyNamesInfo next_incl;
XkbFile *file;
file = ProcessIncludeFile(info->ctx, stmt, FILE_TYPE_KEYCODES);
if (!file) {
info->errorCount += 10;
ClearKeyNamesInfo(&included);
return false;
}
InitKeyNamesInfo(&next_incl, info->ctx);
HandleKeycodesFile(&next_incl, file, MERGE_OVERRIDE);
MergeIncludedKeycodes(&included, &next_incl, stmt->merge);
ClearKeyNamesInfo(&next_incl);
FreeXkbFile(file);
}
MergeIncludedKeycodes(info, &included, include->merge);
ClearKeyNamesInfo(&included);
return (info->errorCount == 0);
}
static bool
HandleKeycodeDef(KeyNamesInfo *info, KeycodeDef *stmt, enum merge_mode merge)
{
if (stmt->merge != MERGE_DEFAULT) {
if (stmt->merge == MERGE_REPLACE)
merge = MERGE_OVERRIDE;
else
merge = stmt->merge;
}
if (stmt->value < 0 || stmt->value > XKB_KEYCODE_MAX) {
log_err(info->ctx,
"Illegal keycode %lld: must be between 0..%u; "
"Key ignored\n", (long long) stmt->value, XKB_KEYCODE_MAX);
return false;
}
return AddKeyName(info, stmt->value, stmt->name, merge, false, true);
}
static bool
HandleAliasDef(KeyNamesInfo *info, KeyAliasDef *def, enum merge_mode merge)
{
AliasInfo *old, new;
darray_foreach(old, info->aliases) {
if (old->alias == def->alias) {
if (def->real == old->real) {
log_vrb(info->ctx, 1,
"Alias of %s for %s declared more than once; "
"First definition ignored\n",
KeyNameText(info->ctx, def->alias),
KeyNameText(info->ctx, def->real));
}
else {
xkb_atom_t use, ignore;
use = (merge == MERGE_AUGMENT ? old->real : def->real);
ignore = (merge == MERGE_AUGMENT ? def->real : old->real);
log_warn(info->ctx,
"Multiple definitions for alias %s; "
"Using %s, ignoring %s\n",
KeyNameText(info->ctx, old->alias),
KeyNameText(info->ctx, use),
KeyNameText(info->ctx, ignore));
old->real = use;
}
old->merge = merge;
return true;
}
}
InitAliasInfo(&new, merge, def->alias, def->real);
darray_append(info->aliases, new);
return true;
}
static bool
HandleKeyNameVar(KeyNamesInfo *info, VarDef *stmt)
{
const char *elem, *field;
ExprDef *arrayNdx;
if (!ExprResolveLhs(info->ctx, stmt->name, &elem, &field, &arrayNdx))
return false;
if (elem) {
log_err(info->ctx, "Unknown element %s encountered; "
"Default for field %s ignored\n", elem, field);
return false;
}
if (!istreq(field, "minimum") && !istreq(field, "maximum")) {
log_err(info->ctx, "Unknown field encountered; "
"Assignment to field %s ignored\n", field);
return false;
}
/* We ignore explicit min/max statements, we always use computed. */
return true;
}
static bool
HandleLedNameDef(KeyNamesInfo *info, LedNameDef *def,
enum merge_mode merge)
{
LedNameInfo ledi;
xkb_atom_t name;
if (def->ndx < 1 || def->ndx > XKB_MAX_LEDS) {
info->errorCount++;
log_err(info->ctx,
"Illegal indicator index (%d) specified; must be between 1 .. %d; "
"Ignored\n", def->ndx, XKB_MAX_LEDS);
return false;
}
if (!ExprResolveString(info->ctx, def->name, &name)) {
char buf[20];
snprintf(buf, sizeof(buf), "%u", def->ndx);
info->errorCount++;
return ReportBadType(info->ctx, "indicator", "name", buf, "string");
}
ledi.merge = merge;
ledi.name = name;
return AddLedName(info, merge, true, &ledi, def->ndx - 1);
}
static void
HandleKeycodesFile(KeyNamesInfo *info, XkbFile *file, enum merge_mode merge)
{
bool ok;
free(info->name);
info->name = strdup_safe(file->name);
for (ParseCommon *stmt = file->defs; stmt; stmt = stmt->next) {
switch (stmt->type) {
case STMT_INCLUDE:
ok = HandleIncludeKeycodes(info, (IncludeStmt *) stmt);
break;
case STMT_KEYCODE:
ok = HandleKeycodeDef(info, (KeycodeDef *) stmt, merge);
break;
case STMT_ALIAS:
ok = HandleAliasDef(info, (KeyAliasDef *) stmt, merge);
break;
case STMT_VAR:
ok = HandleKeyNameVar(info, (VarDef *) stmt);
break;
case STMT_LED_NAME:
ok = HandleLedNameDef(info, (LedNameDef *) stmt, merge);
break;
default:
log_err(info->ctx,
"Keycode files may define key and indicator names only; "
"Ignoring %s\n", stmt_type_to_string(stmt->type));
ok = false;
break;
}
if (!ok)
info->errorCount++;
if (info->errorCount > 10) {
log_err(info->ctx, "Abandoning keycodes file \"%s\"\n",
file->name);
break;
}
}
}
/***====================================================================***/
static bool
CopyKeyNamesToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info)
{
struct xkb_key *keys;
xkb_keycode_t min_key_code, max_key_code, kc;
min_key_code = info->min_key_code;
max_key_code = info->max_key_code;
/* If the keymap has no keys, let's just use the safest pair we know. */
if (min_key_code == XKB_KEYCODE_INVALID) {
min_key_code = 8;
max_key_code = 255;
}
keys = calloc(max_key_code + 1, sizeof(*keys));
if (!keys)
return false;
for (kc = min_key_code; kc <= max_key_code; kc++)
keys[kc].keycode = kc;
for (kc = info->min_key_code; kc <= info->max_key_code; kc++)
keys[kc].name = darray_item(info->key_names, kc);
keymap->min_key_code = min_key_code;
keymap->max_key_code = max_key_code;
keymap->keys = keys;
return true;
}
static bool
CopyKeyAliasesToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info)
{
AliasInfo *alias;
unsigned i, num_key_aliases;
struct xkb_key_alias *key_aliases;
/*
* Do some sanity checking on the aliases. We can't do it before
* because keys and their aliases may be added out-of-order.
*/
num_key_aliases = 0;
darray_foreach(alias, info->aliases) {
/* Check that ->real is a key. */
if (!XkbKeyByName(keymap, alias->real, false)) {
log_vrb(info->ctx, 5,
"Attempt to alias %s to non-existent key %s; Ignored\n",
KeyNameText(info->ctx, alias->alias),
KeyNameText(info->ctx, alias->real));
alias->real = XKB_ATOM_NONE;
continue;
}
/* Check that ->alias is not a key. */
if (XkbKeyByName(keymap, alias->alias, false)) {
log_vrb(info->ctx, 5,
"Attempt to create alias with the name of a real key; "
"Alias \"%s = %s\" ignored\n",
KeyNameText(info->ctx, alias->alias),
KeyNameText(info->ctx, alias->real));
alias->real = XKB_ATOM_NONE;
continue;
}
num_key_aliases++;
}
/* Copy key aliases. */
key_aliases = NULL;
if (num_key_aliases > 0) {
key_aliases = calloc(num_key_aliases, sizeof(*key_aliases));
if (!key_aliases)
return false;
i = 0;
darray_foreach(alias, info->aliases) {
if (alias->real != XKB_ATOM_NONE) {
key_aliases[i].alias = alias->alias;
key_aliases[i].real = alias->real;
i++;
}
}
}
keymap->num_key_aliases = num_key_aliases;
keymap->key_aliases = key_aliases;
return true;
}
static bool
CopyLedNamesToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info)
{
keymap->num_leds = info->num_led_names;
for (xkb_led_index_t idx = 0; idx < info->num_led_names; idx++) {
LedNameInfo *ledi = &info->led_names[idx];
if (ledi->name == XKB_ATOM_NONE)
continue;
keymap->leds[idx].name = ledi->name;
}
return true;
}
static bool
CopyKeyNamesInfoToKeymap(struct xkb_keymap *keymap, KeyNamesInfo *info)
{
/* This function trashes keymap on error, but that's OK. */
if (!CopyKeyNamesToKeymap(keymap, info) ||
!CopyKeyAliasesToKeymap(keymap, info) ||
!CopyLedNamesToKeymap(keymap, info))
return false;
keymap->keycodes_section_name = strdup_safe(info->name);
XkbEscapeMapName(keymap->keycodes_section_name);
return true;
}
/***====================================================================***/
bool
CompileKeycodes(XkbFile *file, struct xkb_keymap *keymap,
enum merge_mode merge)
{
KeyNamesInfo info;
InitKeyNamesInfo(&info, keymap->ctx);
HandleKeycodesFile(&info, file, merge);
if (info.errorCount != 0)
goto err_info;
if (!CopyKeyNamesInfoToKeymap(keymap, &info))
goto err_info;
ClearKeyNamesInfo(&info);
return true;
err_info:
ClearKeyNamesInfo(&info);
return false;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_321_0 |
crossvul-cpp_data_good_3154_0 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* The IP to API glue.
*
* Authors: see ip.c
*
* Fixes:
* Many : Split from ip.c , see ip.c for history.
* Martin Mares : TOS setting fixed.
* Alan Cox : Fixed a couple of oopses in Martin's
* TOS tweaks.
* Mike McLagan : Routing by source
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/tcp_states.h>
#include <linux/udp.h>
#include <linux/igmp.h>
#include <linux/netfilter.h>
#include <linux/route.h>
#include <linux/mroute.h>
#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/compat.h>
#include <net/checksum.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/transp_v6.h>
#endif
#include <net/ip_fib.h>
#include <linux/errqueue.h>
#include <linux/uaccess.h>
/*
* SOL_IP control messages.
*/
static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb)
{
struct in_pktinfo info = *PKTINFO_SKB_CB(skb);
info.ipi_addr.s_addr = ip_hdr(skb)->daddr;
put_cmsg(msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
static void ip_cmsg_recv_ttl(struct msghdr *msg, struct sk_buff *skb)
{
int ttl = ip_hdr(skb)->ttl;
put_cmsg(msg, SOL_IP, IP_TTL, sizeof(int), &ttl);
}
static void ip_cmsg_recv_tos(struct msghdr *msg, struct sk_buff *skb)
{
put_cmsg(msg, SOL_IP, IP_TOS, 1, &ip_hdr(skb)->tos);
}
static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
{
if (IPCB(skb)->opt.optlen == 0)
return;
put_cmsg(msg, SOL_IP, IP_RECVOPTS, IPCB(skb)->opt.optlen,
ip_hdr(skb) + 1);
}
static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
if (IPCB(skb)->opt.optlen == 0)
return;
if (ip_options_echo(opt, skb)) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
ip_options_undo(opt);
put_cmsg(msg, SOL_IP, IP_RETOPTS, opt->optlen, opt->__data);
}
static void ip_cmsg_recv_fragsize(struct msghdr *msg, struct sk_buff *skb)
{
int val;
if (IPCB(skb)->frag_max_size == 0)
return;
val = IPCB(skb)->frag_max_size;
put_cmsg(msg, SOL_IP, IP_RECVFRAGSIZE, sizeof(val), &val);
}
static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
int tlen, int offset)
{
__wsum csum = skb->csum;
if (skb->ip_summed != CHECKSUM_COMPLETE)
return;
if (offset != 0)
csum = csum_sub(csum,
csum_partial(skb_transport_header(skb) + tlen,
offset, 0));
put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
}
static void ip_cmsg_recv_security(struct msghdr *msg, struct sk_buff *skb)
{
char *secdata;
u32 seclen, secid;
int err;
err = security_socket_getpeersec_dgram(NULL, skb, &secid);
if (err)
return;
err = security_secid_to_secctx(secid, &secdata, &seclen);
if (err)
return;
put_cmsg(msg, SOL_IP, SCM_SECURITY, seclen, secdata);
security_release_secctx(secdata, seclen);
}
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
const struct iphdr *iph = ip_hdr(skb);
__be16 *ports = (__be16 *)skb_transport_header(skb);
if (skb_transport_offset(skb) + 4 > (int)skb->len)
return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = iph->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
put_cmsg(msg, SOL_IP, IP_ORIGDSTADDR, sizeof(sin), &sin);
}
void ip_cmsg_recv_offset(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb, int tlen, int offset)
{
struct inet_sock *inet = inet_sk(sk);
unsigned int flags = inet->cmsg_flags;
/* Ordered by supposed usage frequency */
if (flags & IP_CMSG_PKTINFO) {
ip_cmsg_recv_pktinfo(msg, skb);
flags &= ~IP_CMSG_PKTINFO;
if (!flags)
return;
}
if (flags & IP_CMSG_TTL) {
ip_cmsg_recv_ttl(msg, skb);
flags &= ~IP_CMSG_TTL;
if (!flags)
return;
}
if (flags & IP_CMSG_TOS) {
ip_cmsg_recv_tos(msg, skb);
flags &= ~IP_CMSG_TOS;
if (!flags)
return;
}
if (flags & IP_CMSG_RECVOPTS) {
ip_cmsg_recv_opts(msg, skb);
flags &= ~IP_CMSG_RECVOPTS;
if (!flags)
return;
}
if (flags & IP_CMSG_RETOPTS) {
ip_cmsg_recv_retopts(msg, skb);
flags &= ~IP_CMSG_RETOPTS;
if (!flags)
return;
}
if (flags & IP_CMSG_PASSSEC) {
ip_cmsg_recv_security(msg, skb);
flags &= ~IP_CMSG_PASSSEC;
if (!flags)
return;
}
if (flags & IP_CMSG_ORIGDSTADDR) {
ip_cmsg_recv_dstaddr(msg, skb);
flags &= ~IP_CMSG_ORIGDSTADDR;
if (!flags)
return;
}
if (flags & IP_CMSG_CHECKSUM)
ip_cmsg_recv_checksum(msg, skb, tlen, offset);
if (flags & IP_CMSG_RECVFRAGSIZE)
ip_cmsg_recv_fragsize(msg, skb);
}
EXPORT_SYMBOL(ip_cmsg_recv_offset);
int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
bool allow_ipv6)
{
int err, val;
struct cmsghdr *cmsg;
struct net *net = sock_net(sk);
for_each_cmsghdr(cmsg, msg) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
#if IS_ENABLED(CONFIG_IPV6)
if (allow_ipv6 &&
cmsg->cmsg_level == SOL_IPV6 &&
cmsg->cmsg_type == IPV6_PKTINFO) {
struct in6_pktinfo *src_info;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
return -EINVAL;
src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
return -EINVAL;
ipc->oif = src_info->ipi6_ifindex;
ipc->addr = src_info->ipi6_addr.s6_addr32[3];
continue;
}
#endif
if (cmsg->cmsg_level == SOL_SOCKET) {
err = __sock_cmsg_send(sk, msg, cmsg, &ipc->sockc);
if (err)
return err;
continue;
}
if (cmsg->cmsg_level != SOL_IP)
continue;
switch (cmsg->cmsg_type) {
case IP_RETOPTS:
err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
/* Our caller is responsible for freeing ipc->opt */
err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
err < 40 ? err : 40);
if (err)
return err;
break;
case IP_PKTINFO:
{
struct in_pktinfo *info;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct in_pktinfo)))
return -EINVAL;
info = (struct in_pktinfo *)CMSG_DATA(cmsg);
ipc->oif = info->ipi_ifindex;
ipc->addr = info->ipi_spec_dst.s_addr;
break;
}
case IP_TTL:
if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
return -EINVAL;
val = *(int *)CMSG_DATA(cmsg);
if (val < 1 || val > 255)
return -EINVAL;
ipc->ttl = val;
break;
case IP_TOS:
if (cmsg->cmsg_len == CMSG_LEN(sizeof(int)))
val = *(int *)CMSG_DATA(cmsg);
else if (cmsg->cmsg_len == CMSG_LEN(sizeof(u8)))
val = *(u8 *)CMSG_DATA(cmsg);
else
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
ipc->tos = val;
ipc->priority = rt_tos2priority(ipc->tos);
break;
default:
return -EINVAL;
}
}
return 0;
}
/* Special input handler for packets caught by router alert option.
They are selected only by protocol field, and then processed likely
local ones; but only if someone wants them! Otherwise, router
not running rsvpd will kill RSVP.
It is user level problem, what it will make with them.
I have no idea, how it will masquearde or NAT them (it is joke, joke :-)),
but receiver should be enough clever f.e. to forward mtrace requests,
sent to multicast group to reach destination designated router.
*/
struct ip_ra_chain __rcu *ip_ra_chain;
static DEFINE_SPINLOCK(ip_ra_lock);
static void ip_ra_destroy_rcu(struct rcu_head *head)
{
struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
sock_put(ra->saved_sk);
kfree(ra);
}
int ip_ra_control(struct sock *sk, unsigned char on,
void (*destructor)(struct sock *))
{
struct ip_ra_chain *ra, *new_ra;
struct ip_ra_chain __rcu **rap;
if (sk->sk_type != SOCK_RAW || inet_sk(sk)->inet_num == IPPROTO_RAW)
return -EINVAL;
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
spin_lock_bh(&ip_ra_lock);
for (rap = &ip_ra_chain;
(ra = rcu_dereference_protected(*rap,
lockdep_is_held(&ip_ra_lock))) != NULL;
rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
spin_unlock_bh(&ip_ra_lock);
kfree(new_ra);
return -EADDRINUSE;
}
/* dont let ip_call_ra_chain() use sk again */
ra->sk = NULL;
RCU_INIT_POINTER(*rap, ra->next);
spin_unlock_bh(&ip_ra_lock);
if (ra->destructor)
ra->destructor(sk);
/*
* Delay sock_put(sk) and kfree(ra) after one rcu grace
* period. This guarantee ip_call_ra_chain() dont need
* to mess with socket refcounts.
*/
ra->saved_sk = sk;
call_rcu(&ra->rcu, ip_ra_destroy_rcu);
return 0;
}
}
if (!new_ra) {
spin_unlock_bh(&ip_ra_lock);
return -ENOBUFS;
}
new_ra->sk = sk;
new_ra->destructor = destructor;
RCU_INIT_POINTER(new_ra->next, ra);
rcu_assign_pointer(*rap, new_ra);
sock_hold(sk);
spin_unlock_bh(&ip_ra_lock);
return 0;
}
void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
struct sock_exterr_skb *serr;
skb = skb_clone(skb, GFP_ATOMIC);
if (!skb)
return;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_ICMP;
serr->ee.ee_type = icmp_hdr(skb)->type;
serr->ee.ee_code = icmp_hdr(skb)->code;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&(((struct iphdr *)(icmp_hdr(skb) + 1))->daddr) -
skb_network_header(skb);
serr->port = port;
if (skb_pull(skb, payload - skb->data)) {
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb) == 0)
return;
}
kfree_skb(skb);
}
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct sock_exterr_skb *serr;
struct iphdr *iph;
struct sk_buff *skb;
if (!inet->recverr)
return;
skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
if (!skb)
return;
skb_put(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->daddr = daddr;
serr = SKB_EXT_ERR(skb);
serr->ee.ee_errno = err;
serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
serr->ee.ee_type = 0;
serr->ee.ee_code = 0;
serr->ee.ee_pad = 0;
serr->ee.ee_info = info;
serr->ee.ee_data = 0;
serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
serr->port = port;
__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
skb_reset_transport_header(skb);
if (sock_queue_err_skb(sk, skb))
kfree_skb(skb);
}
/* For some errors we have valid addr_offset even with zero payload and
* zero port. Also, addr_offset should be supported if port is set.
*/
static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
{
return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
}
/* IPv4 supports cmsg on all imcp errors and some timestamps
*
* Timestamp code paths do not initialize the fields expected by cmsg:
* the PKTINFO fields in skb->cb[]. Fill those in here.
*/
static bool ipv4_datagram_support_cmsg(const struct sock *sk,
struct sk_buff *skb,
int ee_origin)
{
struct in_pktinfo *info;
if (ee_origin == SO_EE_ORIGIN_ICMP)
return true;
if (ee_origin == SO_EE_ORIGIN_LOCAL)
return false;
/* Support IP_PKTINFO on tstamp packets if requested, to correlate
* timestamp with egress dev. Not possible for packets without dev
* or without payload (SOF_TIMESTAMPING_OPT_TSONLY).
*/
if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) ||
(!skb->dev))
return false;
info = PKTINFO_SKB_CB(skb);
info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr;
info->ipi_ifindex = skb->dev->ifindex;
return true;
}
/*
* Handle MSG_ERRQUEUE
*/
int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
{
struct sock_exterr_skb *serr;
struct sk_buff *skb;
DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name);
struct {
struct sock_extended_err ee;
struct sockaddr_in offender;
} errhdr;
int err;
int copied;
WARN_ON_ONCE(sk->sk_family == AF_INET6);
err = -EAGAIN;
skb = sock_dequeue_err_skb(sk);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
msg->msg_flags |= MSG_TRUNC;
copied = len;
}
err = skb_copy_datagram_msg(skb, 0, msg, copied);
if (unlikely(err)) {
kfree_skb(skb);
return err;
}
sock_recv_timestamp(msg, sk, skb);
serr = SKB_EXT_ERR(skb);
if (sin && ipv4_datagram_support_addr(serr)) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
serr->addr_offset);
sin->sin_port = serr->port;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
*addr_len = sizeof(*sin);
}
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
memset(sin, 0, sizeof(*sin));
if (ipv4_datagram_support_cmsg(sk, skb, serr->ee.ee_origin)) {
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
put_cmsg(msg, SOL_IP, IP_RECVERR, sizeof(errhdr), &errhdr);
/* Now we could try to dump offended packet options */
msg->msg_flags |= MSG_ERRQUEUE;
err = copied;
consume_skb(skb);
out:
return err;
}
/*
* Socket option code for IP. This is the end of the line after any
* TCP,UDP etc options on an IP socket.
*/
static bool setsockopt_needs_rtnl(int optname)
{
switch (optname) {
case IP_ADD_MEMBERSHIP:
case IP_ADD_SOURCE_MEMBERSHIP:
case IP_BLOCK_SOURCE:
case IP_DROP_MEMBERSHIP:
case IP_DROP_SOURCE_MEMBERSHIP:
case IP_MSFILTER:
case IP_UNBLOCK_SOURCE:
case MCAST_BLOCK_SOURCE:
case MCAST_MSFILTER:
case MCAST_JOIN_GROUP:
case MCAST_JOIN_SOURCE_GROUP:
case MCAST_LEAVE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_UNBLOCK_SOURCE:
return true;
}
return false;
}
static int do_ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
int val = 0, err;
bool needs_rtnl = setsockopt_needs_rtnl(optname);
switch (optname) {
case IP_PKTINFO:
case IP_RECVTTL:
case IP_RECVOPTS:
case IP_RECVTOS:
case IP_RETOPTS:
case IP_TOS:
case IP_TTL:
case IP_HDRINCL:
case IP_MTU_DISCOVER:
case IP_RECVERR:
case IP_ROUTER_ALERT:
case IP_FREEBIND:
case IP_PASSSEC:
case IP_TRANSPARENT:
case IP_MINTTL:
case IP_NODEFRAG:
case IP_BIND_ADDRESS_NO_PORT:
case IP_UNICAST_IF:
case IP_MULTICAST_TTL:
case IP_MULTICAST_ALL:
case IP_MULTICAST_LOOP:
case IP_RECVORIGDSTADDR:
case IP_CHECKSUM:
case IP_RECVFRAGSIZE:
if (optlen >= sizeof(int)) {
if (get_user(val, (int __user *) optval))
return -EFAULT;
} else if (optlen >= sizeof(char)) {
unsigned char ucval;
if (get_user(ucval, (unsigned char __user *) optval))
return -EFAULT;
val = (int) ucval;
}
}
/* If optlen==0, it is equivalent to val == 0 */
if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk, optname, optval, optlen);
err = 0;
if (needs_rtnl)
rtnl_lock();
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
struct ip_options_rcu *old, *opt = NULL;
if (optlen > 40)
goto e_inval;
err = ip_options_get_from_user(sock_net(sk), &opt,
optval, optlen);
if (err)
break;
old = rcu_dereference_protected(inet->inet_opt,
lockdep_sock_is_held(sk));
if (inet->is_icsk) {
struct inet_connection_sock *icsk = inet_csk(sk);
#if IS_ENABLED(CONFIG_IPV6)
if (sk->sk_family == PF_INET ||
(!((1 << sk->sk_state) &
(TCPF_LISTEN | TCPF_CLOSE)) &&
inet->inet_daddr != LOOPBACK4_IPV6)) {
#endif
if (old)
icsk->icsk_ext_hdr_len -= old->opt.optlen;
if (opt)
icsk->icsk_ext_hdr_len += opt->opt.optlen;
icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie);
#if IS_ENABLED(CONFIG_IPV6)
}
#endif
}
rcu_assign_pointer(inet->inet_opt, opt);
if (old)
kfree_rcu(old, rcu);
break;
}
case IP_PKTINFO:
if (val)
inet->cmsg_flags |= IP_CMSG_PKTINFO;
else
inet->cmsg_flags &= ~IP_CMSG_PKTINFO;
break;
case IP_RECVTTL:
if (val)
inet->cmsg_flags |= IP_CMSG_TTL;
else
inet->cmsg_flags &= ~IP_CMSG_TTL;
break;
case IP_RECVTOS:
if (val)
inet->cmsg_flags |= IP_CMSG_TOS;
else
inet->cmsg_flags &= ~IP_CMSG_TOS;
break;
case IP_RECVOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RECVOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RECVOPTS;
break;
case IP_RETOPTS:
if (val)
inet->cmsg_flags |= IP_CMSG_RETOPTS;
else
inet->cmsg_flags &= ~IP_CMSG_RETOPTS;
break;
case IP_PASSSEC:
if (val)
inet->cmsg_flags |= IP_CMSG_PASSSEC;
else
inet->cmsg_flags &= ~IP_CMSG_PASSSEC;
break;
case IP_RECVORIGDSTADDR:
if (val)
inet->cmsg_flags |= IP_CMSG_ORIGDSTADDR;
else
inet->cmsg_flags &= ~IP_CMSG_ORIGDSTADDR;
break;
case IP_CHECKSUM:
if (val) {
if (!(inet->cmsg_flags & IP_CMSG_CHECKSUM)) {
inet_inc_convert_csum(sk);
inet->cmsg_flags |= IP_CMSG_CHECKSUM;
}
} else {
if (inet->cmsg_flags & IP_CMSG_CHECKSUM) {
inet_dec_convert_csum(sk);
inet->cmsg_flags &= ~IP_CMSG_CHECKSUM;
}
}
break;
case IP_RECVFRAGSIZE:
if (sk->sk_type != SOCK_RAW && sk->sk_type != SOCK_DGRAM)
goto e_inval;
if (val)
inet->cmsg_flags |= IP_CMSG_RECVFRAGSIZE;
else
inet->cmsg_flags &= ~IP_CMSG_RECVFRAGSIZE;
break;
case IP_TOS: /* This sets both TOS and Precedence */
if (sk->sk_type == SOCK_STREAM) {
val &= ~INET_ECN_MASK;
val |= inet->tos & INET_ECN_MASK;
}
if (inet->tos != val) {
inet->tos = val;
sk->sk_priority = rt_tos2priority(val);
sk_dst_reset(sk);
}
break;
case IP_TTL:
if (optlen < 1)
goto e_inval;
if (val != -1 && (val < 1 || val > 255))
goto e_inval;
inet->uc_ttl = val;
break;
case IP_HDRINCL:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->hdrincl = val ? 1 : 0;
break;
case IP_NODEFRAG:
if (sk->sk_type != SOCK_RAW) {
err = -ENOPROTOOPT;
break;
}
inet->nodefrag = val ? 1 : 0;
break;
case IP_BIND_ADDRESS_NO_PORT:
inet->bind_address_no_port = val ? 1 : 0;
break;
case IP_MTU_DISCOVER:
if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
goto e_inval;
inet->pmtudisc = val;
break;
case IP_RECVERR:
inet->recverr = !!val;
if (!val)
skb_queue_purge(&sk->sk_error_queue);
break;
case IP_MULTICAST_TTL:
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
if (optlen < 1)
goto e_inval;
if (val == -1)
val = 1;
if (val < 0 || val > 255)
goto e_inval;
inet->mc_ttl = val;
break;
case IP_MULTICAST_LOOP:
if (optlen < 1)
goto e_inval;
inet->mc_loop = !!val;
break;
case IP_UNICAST_IF:
{
struct net_device *dev = NULL;
int ifindex;
if (optlen != sizeof(int))
goto e_inval;
ifindex = (__force int)ntohl((__force __be32)val);
if (ifindex == 0) {
inet->uc_index = 0;
err = 0;
break;
}
dev = dev_get_by_index(sock_net(sk), ifindex);
err = -EADDRNOTAVAIL;
if (!dev)
break;
dev_put(dev);
err = -EINVAL;
if (sk->sk_bound_dev_if)
break;
inet->uc_index = ifindex;
err = 0;
break;
}
case IP_MULTICAST_IF:
{
struct ip_mreqn mreq;
struct net_device *dev = NULL;
if (sk->sk_type == SOCK_STREAM)
goto e_inval;
/*
* Check the arguments are allowable
*/
if (optlen < sizeof(struct in_addr))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (optlen >= sizeof(struct ip_mreq)) {
if (copy_from_user(&mreq, optval,
sizeof(struct ip_mreq)))
break;
} else if (optlen >= sizeof(struct in_addr)) {
if (copy_from_user(&mreq.imr_address, optval,
sizeof(struct in_addr)))
break;
}
}
if (!mreq.imr_ifindex) {
if (mreq.imr_address.s_addr == htonl(INADDR_ANY)) {
inet->mc_index = 0;
inet->mc_addr = 0;
err = 0;
break;
}
dev = ip_dev_find(sock_net(sk), mreq.imr_address.s_addr);
if (dev)
mreq.imr_ifindex = dev->ifindex;
} else
dev = dev_get_by_index(sock_net(sk), mreq.imr_ifindex);
err = -EADDRNOTAVAIL;
if (!dev)
break;
dev_put(dev);
err = -EINVAL;
if (sk->sk_bound_dev_if &&
mreq.imr_ifindex != sk->sk_bound_dev_if)
break;
inet->mc_index = mreq.imr_ifindex;
inet->mc_addr = mreq.imr_address.s_addr;
err = 0;
break;
}
case IP_ADD_MEMBERSHIP:
case IP_DROP_MEMBERSHIP:
{
struct ip_mreqn mreq;
err = -EPROTO;
if (inet_sk(sk)->is_icsk)
break;
if (optlen < sizeof(struct ip_mreq))
goto e_inval;
err = -EFAULT;
if (optlen >= sizeof(struct ip_mreqn)) {
if (copy_from_user(&mreq, optval, sizeof(mreq)))
break;
} else {
memset(&mreq, 0, sizeof(mreq));
if (copy_from_user(&mreq, optval, sizeof(struct ip_mreq)))
break;
}
if (optname == IP_ADD_MEMBERSHIP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case IP_MSFILTER:
{
struct ip_msfilter *msf;
if (optlen < IP_MSFILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
msf = kmalloc(optlen, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(msf, optval, optlen)) {
kfree(msf);
break;
}
/* numsrc >= (1G-4) overflow in 32 bits */
if (msf->imsf_numsrc >= 0x3ffffffcU ||
msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
kfree(msf);
err = -ENOBUFS;
break;
}
if (IP_MSFILTER_SIZE(msf->imsf_numsrc) > optlen) {
kfree(msf);
err = -EINVAL;
break;
}
err = ip_mc_msfilter(sk, msf, 0);
kfree(msf);
break;
}
case IP_BLOCK_SOURCE:
case IP_UNBLOCK_SOURCE:
case IP_ADD_SOURCE_MEMBERSHIP:
case IP_DROP_SOURCE_MEMBERSHIP:
{
struct ip_mreq_source mreqs;
int omode, add;
if (optlen != sizeof(struct ip_mreq_source))
goto e_inval;
if (copy_from_user(&mreqs, optval, sizeof(mreqs))) {
err = -EFAULT;
break;
}
if (optname == IP_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == IP_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == IP_ADD_SOURCE_MEMBERSHIP) {
struct ip_mreqn mreq;
mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
mreq.imr_address.s_addr = mreqs.imr_interface;
mreq.imr_ifindex = 0;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
omode = MCAST_INCLUDE;
add = 1;
} else /* IP_DROP_SOURCE_MEMBERSHIP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs, 0);
break;
}
case MCAST_JOIN_GROUP:
case MCAST_LEAVE_GROUP:
{
struct group_req greq;
struct sockaddr_in *psin;
struct ip_mreqn mreq;
if (optlen < sizeof(struct group_req))
goto e_inval;
err = -EFAULT;
if (copy_from_user(&greq, optval, sizeof(greq)))
break;
psin = (struct sockaddr_in *)&greq.gr_group;
if (psin->sin_family != AF_INET)
goto e_inval;
memset(&mreq, 0, sizeof(mreq));
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_ifindex = greq.gr_interface;
if (optname == MCAST_JOIN_GROUP)
err = ip_mc_join_group(sk, &mreq);
else
err = ip_mc_leave_group(sk, &mreq);
break;
}
case MCAST_JOIN_SOURCE_GROUP:
case MCAST_LEAVE_SOURCE_GROUP:
case MCAST_BLOCK_SOURCE:
case MCAST_UNBLOCK_SOURCE:
{
struct group_source_req greqs;
struct ip_mreq_source mreqs;
struct sockaddr_in *psin;
int omode, add;
if (optlen != sizeof(struct group_source_req))
goto e_inval;
if (copy_from_user(&greqs, optval, sizeof(greqs))) {
err = -EFAULT;
break;
}
if (greqs.gsr_group.ss_family != AF_INET ||
greqs.gsr_source.ss_family != AF_INET) {
err = -EADDRNOTAVAIL;
break;
}
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreqs.imr_multiaddr = psin->sin_addr.s_addr;
psin = (struct sockaddr_in *)&greqs.gsr_source;
mreqs.imr_sourceaddr = psin->sin_addr.s_addr;
mreqs.imr_interface = 0; /* use index for mc_source */
if (optname == MCAST_BLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 1;
} else if (optname == MCAST_UNBLOCK_SOURCE) {
omode = MCAST_EXCLUDE;
add = 0;
} else if (optname == MCAST_JOIN_SOURCE_GROUP) {
struct ip_mreqn mreq;
psin = (struct sockaddr_in *)&greqs.gsr_group;
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_address.s_addr = 0;
mreq.imr_ifindex = greqs.gsr_interface;
err = ip_mc_join_group(sk, &mreq);
if (err && err != -EADDRINUSE)
break;
greqs.gsr_interface = mreq.imr_ifindex;
omode = MCAST_INCLUDE;
add = 1;
} else /* MCAST_LEAVE_SOURCE_GROUP */ {
omode = MCAST_INCLUDE;
add = 0;
}
err = ip_mc_source(add, omode, sk, &mreqs,
greqs.gsr_interface);
break;
}
case MCAST_MSFILTER:
{
struct sockaddr_in *psin;
struct ip_msfilter *msf = NULL;
struct group_filter *gsf = NULL;
int msize, i, ifindex;
if (optlen < GROUP_FILTER_SIZE(0))
goto e_inval;
if (optlen > sysctl_optmem_max) {
err = -ENOBUFS;
break;
}
gsf = kmalloc(optlen, GFP_KERNEL);
if (!gsf) {
err = -ENOBUFS;
break;
}
err = -EFAULT;
if (copy_from_user(gsf, optval, optlen))
goto mc_msf_out;
/* numsrc >= (4G-140)/128 overflow in 32 bits */
if (gsf->gf_numsrc >= 0x1ffffff ||
gsf->gf_numsrc > net->ipv4.sysctl_igmp_max_msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
if (GROUP_FILTER_SIZE(gsf->gf_numsrc) > optlen) {
err = -EINVAL;
goto mc_msf_out;
}
msize = IP_MSFILTER_SIZE(gsf->gf_numsrc);
msf = kmalloc(msize, GFP_KERNEL);
if (!msf) {
err = -ENOBUFS;
goto mc_msf_out;
}
ifindex = gsf->gf_interface;
psin = (struct sockaddr_in *)&gsf->gf_group;
if (psin->sin_family != AF_INET) {
err = -EADDRNOTAVAIL;
goto mc_msf_out;
}
msf->imsf_multiaddr = psin->sin_addr.s_addr;
msf->imsf_interface = 0;
msf->imsf_fmode = gsf->gf_fmode;
msf->imsf_numsrc = gsf->gf_numsrc;
err = -EADDRNOTAVAIL;
for (i = 0; i < gsf->gf_numsrc; ++i) {
psin = (struct sockaddr_in *)&gsf->gf_slist[i];
if (psin->sin_family != AF_INET)
goto mc_msf_out;
msf->imsf_slist[i] = psin->sin_addr.s_addr;
}
kfree(gsf);
gsf = NULL;
err = ip_mc_msfilter(sk, msf, ifindex);
mc_msf_out:
kfree(msf);
kfree(gsf);
break;
}
case IP_MULTICAST_ALL:
if (optlen < 1)
goto e_inval;
if (val != 0 && val != 1)
goto e_inval;
inet->mc_all = val;
break;
case IP_ROUTER_ALERT:
err = ip_ra_control(sk, val ? 1 : 0, NULL);
break;
case IP_FREEBIND:
if (optlen < 1)
goto e_inval;
inet->freebind = !!val;
break;
case IP_IPSEC_POLICY:
case IP_XFRM_POLICY:
err = -EPERM;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
break;
err = xfrm_user_policy(sk, optname, optval, optlen);
break;
case IP_TRANSPARENT:
if (!!val && !ns_capable(sock_net(sk)->user_ns, CAP_NET_RAW) &&
!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
if (optlen < 1)
goto e_inval;
inet->transparent = !!val;
break;
case IP_MINTTL:
if (optlen < 1)
goto e_inval;
if (val < 0 || val > 255)
goto e_inval;
inet->min_ttl = val;
break;
default:
err = -ENOPROTOOPT;
break;
}
release_sock(sk);
if (needs_rtnl)
rtnl_unlock();
return err;
e_inval:
release_sock(sk);
if (needs_rtnl)
rtnl_unlock();
return -EINVAL;
}
/**
* ipv4_pktinfo_prepare - transfer some info from rtable to skb
* @sk: socket
* @skb: buffer
*
* To support IP_CMSG_PKTINFO option, we store rt_iif and specific
* destination in skb->cb[] before dst drop.
* This way, receiver doesn't make cache line misses to read rtable.
*/
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
{
struct in_pktinfo *pktinfo = PKTINFO_SKB_CB(skb);
bool prepare = (inet_sk(sk)->cmsg_flags & IP_CMSG_PKTINFO) ||
ipv6_sk_rxinfo(sk);
if (prepare && skb_rtable(skb)) {
/* skb->cb is overloaded: prior to this point it is IP{6}CB
* which has interface index (iif) as the first member of the
* underlying inet{6}_skb_parm struct. This code then overlays
* PKTINFO_SKB_CB and in_pktinfo also has iif as the first
* element so the iif is picked up from the prior IPCB. If iif
* is the loopback interface, then return the sending interface
* (e.g., process binds socket to eth0 for Tx which is
* redirected to loopback in the rtable/dst).
*/
if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
pktinfo->ipi_ifindex = inet_iif(skb);
pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
} else {
pktinfo->ipi_ifindex = 0;
pktinfo->ipi_spec_dst.s_addr = 0;
}
/* We need to keep the dst for __ip_options_echo()
* We could restrict the test to opt.ts_needtime || opt.srr,
* but the following is good enough as IP options are not often used.
*/
if (unlikely(IPCB(skb)->opt.optlen))
skb_dst_force(skb);
else
skb_dst_drop(skb);
}
int ip_setsockopt(struct sock *sk, int level,
int optname, char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(ip_setsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
int err;
if (level != SOL_IP)
return -ENOPROTOOPT;
if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
return compat_mc_setsockopt(sk, level, optname, optval, optlen,
ip_setsockopt);
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
optname != IP_IPSEC_POLICY &&
optname != IP_XFRM_POLICY &&
!ip_mroute_opt(optname)) {
lock_sock(sk);
err = compat_nf_setsockopt(sk, PF_INET, optname,
optval, optlen);
release_sock(sk);
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_setsockopt);
#endif
/*
* Get the options. Note for future reference. The GET of IP options gets
* the _received_ ones. The set sets the _sent_ ones.
*/
static bool getsockopt_needs_rtnl(int optname)
{
switch (optname) {
case IP_MSFILTER:
case MCAST_MSFILTER:
return true;
}
return false;
}
static int do_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen, unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
bool needs_rtnl = getsockopt_needs_rtnl(optname);
int val, err = 0;
int len;
if (level != SOL_IP)
return -EOPNOTSUPP;
if (ip_mroute_opt(optname))
return ip_mroute_getsockopt(sk, optname, optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
if (needs_rtnl)
rtnl_lock();
lock_sock(sk);
switch (optname) {
case IP_OPTIONS:
{
unsigned char optbuf[sizeof(struct ip_options)+40];
struct ip_options *opt = (struct ip_options *)optbuf;
struct ip_options_rcu *inet_opt;
inet_opt = rcu_dereference_protected(inet->inet_opt,
lockdep_sock_is_held(sk));
opt->optlen = 0;
if (inet_opt)
memcpy(optbuf, &inet_opt->opt,
sizeof(struct ip_options) +
inet_opt->opt.optlen);
release_sock(sk);
if (opt->optlen == 0)
return put_user(0, optlen);
ip_options_undo(opt);
len = min_t(unsigned int, len, opt->optlen);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, opt->__data, len))
return -EFAULT;
return 0;
}
case IP_PKTINFO:
val = (inet->cmsg_flags & IP_CMSG_PKTINFO) != 0;
break;
case IP_RECVTTL:
val = (inet->cmsg_flags & IP_CMSG_TTL) != 0;
break;
case IP_RECVTOS:
val = (inet->cmsg_flags & IP_CMSG_TOS) != 0;
break;
case IP_RECVOPTS:
val = (inet->cmsg_flags & IP_CMSG_RECVOPTS) != 0;
break;
case IP_RETOPTS:
val = (inet->cmsg_flags & IP_CMSG_RETOPTS) != 0;
break;
case IP_PASSSEC:
val = (inet->cmsg_flags & IP_CMSG_PASSSEC) != 0;
break;
case IP_RECVORIGDSTADDR:
val = (inet->cmsg_flags & IP_CMSG_ORIGDSTADDR) != 0;
break;
case IP_CHECKSUM:
val = (inet->cmsg_flags & IP_CMSG_CHECKSUM) != 0;
break;
case IP_RECVFRAGSIZE:
val = (inet->cmsg_flags & IP_CMSG_RECVFRAGSIZE) != 0;
break;
case IP_TOS:
val = inet->tos;
break;
case IP_TTL:
{
struct net *net = sock_net(sk);
val = (inet->uc_ttl == -1 ?
net->ipv4.sysctl_ip_default_ttl :
inet->uc_ttl);
break;
}
case IP_HDRINCL:
val = inet->hdrincl;
break;
case IP_NODEFRAG:
val = inet->nodefrag;
break;
case IP_BIND_ADDRESS_NO_PORT:
val = inet->bind_address_no_port;
break;
case IP_MTU_DISCOVER:
val = inet->pmtudisc;
break;
case IP_MTU:
{
struct dst_entry *dst;
val = 0;
dst = sk_dst_get(sk);
if (dst) {
val = dst_mtu(dst);
dst_release(dst);
}
if (!val) {
release_sock(sk);
return -ENOTCONN;
}
break;
}
case IP_RECVERR:
val = inet->recverr;
break;
case IP_MULTICAST_TTL:
val = inet->mc_ttl;
break;
case IP_MULTICAST_LOOP:
val = inet->mc_loop;
break;
case IP_UNICAST_IF:
val = (__force int)htonl((__u32) inet->uc_index);
break;
case IP_MULTICAST_IF:
{
struct in_addr addr;
len = min_t(unsigned int, len, sizeof(struct in_addr));
addr.s_addr = inet->mc_addr;
release_sock(sk);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &addr, len))
return -EFAULT;
return 0;
}
case IP_MSFILTER:
{
struct ip_msfilter msf;
if (len < IP_MSFILTER_SIZE(0)) {
err = -EINVAL;
goto out;
}
if (copy_from_user(&msf, optval, IP_MSFILTER_SIZE(0))) {
err = -EFAULT;
goto out;
}
err = ip_mc_msfget(sk, &msf,
(struct ip_msfilter __user *)optval, optlen);
goto out;
}
case MCAST_MSFILTER:
{
struct group_filter gsf;
if (len < GROUP_FILTER_SIZE(0)) {
err = -EINVAL;
goto out;
}
if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) {
err = -EFAULT;
goto out;
}
err = ip_mc_gsfget(sk, &gsf,
(struct group_filter __user *)optval,
optlen);
goto out;
}
case IP_MULTICAST_ALL:
val = inet->mc_all;
break;
case IP_PKTOPTIONS:
{
struct msghdr msg;
release_sock(sk);
if (sk->sk_type != SOCK_STREAM)
return -ENOPROTOOPT;
msg.msg_control = (__force void *) optval;
msg.msg_controllen = len;
msg.msg_flags = flags;
if (inet->cmsg_flags & IP_CMSG_PKTINFO) {
struct in_pktinfo info;
info.ipi_addr.s_addr = inet->inet_rcv_saddr;
info.ipi_spec_dst.s_addr = inet->inet_rcv_saddr;
info.ipi_ifindex = inet->mc_index;
put_cmsg(&msg, SOL_IP, IP_PKTINFO, sizeof(info), &info);
}
if (inet->cmsg_flags & IP_CMSG_TTL) {
int hlim = inet->mc_ttl;
put_cmsg(&msg, SOL_IP, IP_TTL, sizeof(hlim), &hlim);
}
if (inet->cmsg_flags & IP_CMSG_TOS) {
int tos = inet->rcv_tos;
put_cmsg(&msg, SOL_IP, IP_TOS, sizeof(tos), &tos);
}
len -= msg.msg_controllen;
return put_user(len, optlen);
}
case IP_FREEBIND:
val = inet->freebind;
break;
case IP_TRANSPARENT:
val = inet->transparent;
break;
case IP_MINTTL:
val = inet->min_ttl;
break;
default:
release_sock(sk);
return -ENOPROTOOPT;
}
release_sock(sk);
if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) {
unsigned char ucval = (unsigned char)val;
len = 1;
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &ucval, 1))
return -EFAULT;
} else {
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &val, len))
return -EFAULT;
}
return 0;
out:
release_sock(sk);
if (needs_rtnl)
rtnl_unlock();
return err;
}
int ip_getsockopt(struct sock *sk, int level,
int optname, char __user *optval, int __user *optlen)
{
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = nf_getsockopt(sk, PF_INET, optname, optval,
&len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(ip_getsockopt);
#ifdef CONFIG_COMPAT
int compat_ip_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int err;
if (optname == MCAST_MSFILTER)
return compat_mc_getsockopt(sk, level, optname, optval, optlen,
ip_getsockopt);
err = do_ip_getsockopt(sk, level, optname, optval, optlen,
MSG_CMSG_COMPAT);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
!ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
return -EFAULT;
lock_sock(sk);
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
release_sock(sk);
if (err >= 0)
err = put_user(len, optlen);
return err;
}
#endif
return err;
}
EXPORT_SYMBOL(compat_ip_getsockopt);
#endif
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3154_0 |
crossvul-cpp_data_bad_4809_1 | /*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "git2.h"
#include "git2/odb_backend.h"
#include "smart.h"
#include "refs.h"
#include "repository.h"
#include "push.h"
#include "pack-objects.h"
#include "remote.h"
#include "util.h"
#define NETWORK_XFER_THRESHOLD (100*1024)
/* The minimal interval between progress updates (in seconds). */
#define MIN_PROGRESS_UPDATE_INTERVAL 0.5
int git_smart__store_refs(transport_smart *t, int flushes)
{
gitno_buffer *buf = &t->buffer;
git_vector *refs = &t->refs;
int error, flush = 0, recvd;
const char *line_end = NULL;
git_pkt *pkt = NULL;
size_t i;
/* Clear existing refs in case git_remote_connect() is called again
* after git_remote_disconnect().
*/
git_vector_foreach(refs, i, pkt) {
git_pkt_free(pkt);
}
git_vector_clear(refs);
pkt = NULL;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data, &line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS)
return error;
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
if (recvd == 0 && !flush) {
giterr_set(GITERR_NET, "early EOF");
return GIT_EEOF;
}
continue;
}
gitno_consume(buf, line_end);
if (pkt->type == GIT_PKT_ERR) {
giterr_set(GITERR_NET, "Remote error: %s", ((git_pkt_err *)pkt)->error);
git__free(pkt);
return -1;
}
if (pkt->type != GIT_PKT_FLUSH && git_vector_insert(refs, pkt) < 0)
return -1;
if (pkt->type == GIT_PKT_FLUSH) {
flush++;
git_pkt_free(pkt);
}
} while (flush < flushes);
return flush;
}
static int append_symref(const char **out, git_vector *symrefs, const char *ptr)
{
int error;
const char *end;
git_buf buf = GIT_BUF_INIT;
git_refspec *mapping = NULL;
ptr += strlen(GIT_CAP_SYMREF);
if (*ptr != '=')
goto on_invalid;
ptr++;
if (!(end = strchr(ptr, ' ')) &&
!(end = strchr(ptr, '\0')))
goto on_invalid;
if ((error = git_buf_put(&buf, ptr, end - ptr)) < 0)
return error;
/* symref mapping has refspec format */
mapping = git__calloc(1, sizeof(git_refspec));
GITERR_CHECK_ALLOC(mapping);
error = git_refspec__parse(mapping, git_buf_cstr(&buf), true);
git_buf_free(&buf);
/* if the error isn't OOM, then it's a parse error; let's use a nicer message */
if (error < 0) {
if (giterr_last()->klass != GITERR_NOMEMORY)
goto on_invalid;
git__free(mapping);
return error;
}
if ((error = git_vector_insert(symrefs, mapping)) < 0)
return error;
*out = end;
return 0;
on_invalid:
giterr_set(GITERR_NET, "remote sent invalid symref");
git_refspec__free(mapping);
git__free(mapping);
return -1;
}
int git_smart__detect_caps(git_pkt_ref *pkt, transport_smart_caps *caps, git_vector *symrefs)
{
const char *ptr;
/* No refs or capabilites, odd but not a problem */
if (pkt == NULL || pkt->capabilities == NULL)
return 0;
ptr = pkt->capabilities;
while (ptr != NULL && *ptr != '\0') {
if (*ptr == ' ')
ptr++;
if (!git__prefixcmp(ptr, GIT_CAP_OFS_DELTA)) {
caps->common = caps->ofs_delta = 1;
ptr += strlen(GIT_CAP_OFS_DELTA);
continue;
}
/* Keep multi_ack_detailed before multi_ack */
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK_DETAILED)) {
caps->common = caps->multi_ack_detailed = 1;
ptr += strlen(GIT_CAP_MULTI_ACK_DETAILED);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK)) {
caps->common = caps->multi_ack = 1;
ptr += strlen(GIT_CAP_MULTI_ACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_INCLUDE_TAG)) {
caps->common = caps->include_tag = 1;
ptr += strlen(GIT_CAP_INCLUDE_TAG);
continue;
}
/* Keep side-band check after side-band-64k */
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND_64K)) {
caps->common = caps->side_band_64k = 1;
ptr += strlen(GIT_CAP_SIDE_BAND_64K);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND)) {
caps->common = caps->side_band = 1;
ptr += strlen(GIT_CAP_SIDE_BAND);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_DELETE_REFS)) {
caps->common = caps->delete_refs = 1;
ptr += strlen(GIT_CAP_DELETE_REFS);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_THIN_PACK)) {
caps->common = caps->thin_pack = 1;
ptr += strlen(GIT_CAP_THIN_PACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SYMREF)) {
int error;
if ((error = append_symref(&ptr, symrefs, ptr)) < 0)
return error;
continue;
}
/* We don't know this capability, so skip it */
ptr = strchr(ptr, ' ');
}
return 0;
}
static int recv_pkt(git_pkt **out, gitno_buffer *buf)
{
const char *ptr = buf->data, *line_end = ptr;
git_pkt *pkt = NULL;
int pkt_type, error = 0, ret;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, ptr, &line_end, buf->offset);
else
error = GIT_EBUFS;
if (error == 0)
break; /* return the pkt */
if (error < 0 && error != GIT_EBUFS)
return error;
if ((ret = gitno_recv(buf)) < 0)
return ret;
} while (error);
gitno_consume(buf, line_end);
pkt_type = pkt->type;
if (out != NULL)
*out = pkt;
else
git__free(pkt);
return pkt_type;
}
static int store_common(transport_smart *t)
{
git_pkt *pkt = NULL;
gitno_buffer *buf = &t->buffer;
int error;
do {
if ((error = recv_pkt(&pkt, buf)) < 0)
return error;
if (pkt->type == GIT_PKT_ACK) {
if (git_vector_insert(&t->common, pkt) < 0)
return -1;
} else {
git__free(pkt);
return 0;
}
} while (1);
return 0;
}
static int fetch_setup_walk(git_revwalk **out, git_repository *repo)
{
git_revwalk *walk = NULL;
git_strarray refs;
unsigned int i;
git_reference *ref;
int error;
if ((error = git_reference_list(&refs, repo)) < 0)
return error;
if ((error = git_revwalk_new(&walk, repo)) < 0)
return error;
git_revwalk_sorting(walk, GIT_SORT_TIME);
for (i = 0; i < refs.count; ++i) {
/* No tags */
if (!git__prefixcmp(refs.strings[i], GIT_REFS_TAGS_DIR))
continue;
if ((error = git_reference_lookup(&ref, repo, refs.strings[i])) < 0)
goto on_error;
if (git_reference_type(ref) == GIT_REF_SYMBOLIC)
continue;
if ((error = git_revwalk_push(walk, git_reference_target(ref))) < 0)
goto on_error;
git_reference_free(ref);
}
git_strarray_free(&refs);
*out = walk;
return 0;
on_error:
git_revwalk_free(walk);
git_reference_free(ref);
git_strarray_free(&refs);
return error;
}
static int wait_while_ack(gitno_buffer *buf)
{
int error;
git_pkt_ack *pkt = NULL;
while (1) {
git__free(pkt);
if ((error = recv_pkt((git_pkt **)&pkt, buf)) < 0)
return error;
if (pkt->type == GIT_PKT_NAK)
break;
if (pkt->type == GIT_PKT_ACK &&
(pkt->status != GIT_ACK_CONTINUE &&
pkt->status != GIT_ACK_COMMON)) {
git__free(pkt);
return 0;
}
}
git__free(pkt);
return 0;
}
int git_smart__negotiate_fetch(git_transport *transport, git_repository *repo, const git_remote_head * const *wants, size_t count)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_buf data = GIT_BUF_INIT;
git_revwalk *walk = NULL;
int error = -1, pkt_type;
unsigned int i;
git_oid oid;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
return error;
if ((error = fetch_setup_walk(&walk, repo)) < 0)
goto on_error;
/*
* Our support for ACK extensions is simply to parse them. On
* the first ACK we will accept that as enough common
* objects. We give up if we haven't found an answer in the
* first 256 we send.
*/
i = 0;
while (i < 256) {
error = git_revwalk_next(&oid, walk);
if (error < 0) {
if (GIT_ITEROVER == error)
break;
goto on_error;
}
git_pkt_buffer_have(&oid, &data);
i++;
if (i % 20 == 0) {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
git_pkt_buffer_flush(&data);
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_clear(&data);
if (t->caps.multi_ack || t->caps.multi_ack_detailed) {
if ((error = store_common(t)) < 0)
goto on_error;
} else {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type == GIT_PKT_ACK) {
break;
} else if (pkt_type == GIT_PKT_NAK) {
continue;
} else if (pkt_type < 0) {
/* recv_pkt returned an error */
error = pkt_type;
goto on_error;
} else {
giterr_set(GITERR_NET, "Unexpected pkt type");
error = -1;
goto on_error;
}
}
}
if (t->common.length > 0)
break;
if (i % 20 == 0 && t->rpc) {
git_pkt_ack *pkt;
unsigned int i;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, i, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
}
/* Tell the other end that we're done negotiating */
if (t->rpc && t->common.length > 0) {
git_pkt_ack *pkt;
unsigned int i;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, i, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
if ((error = git_pkt_buffer_done(&data)) < 0)
goto on_error;
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_free(&data);
git_revwalk_free(walk);
/* Now let's eat up whatever the server gives us */
if (!t->caps.multi_ack && !t->caps.multi_ack_detailed) {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type < 0) {
return pkt_type;
} else if (pkt_type != GIT_PKT_ACK && pkt_type != GIT_PKT_NAK) {
giterr_set(GITERR_NET, "Unexpected pkt type");
return -1;
}
} else {
error = wait_while_ack(buf);
}
return error;
on_error:
git_revwalk_free(walk);
git_buf_free(&data);
return error;
}
static int no_sideband(transport_smart *t, struct git_odb_writepack *writepack, gitno_buffer *buf, git_transfer_progress *stats)
{
int recvd;
do {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
return GIT_EUSER;
}
if (writepack->append(writepack, buf->data, buf->offset, stats) < 0)
return -1;
gitno_consume_n(buf, buf->offset);
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
} while(recvd > 0);
if (writepack->commit(writepack, stats) < 0)
return -1;
return 0;
}
struct network_packetsize_payload
{
git_transfer_progress_cb callback;
void *payload;
git_transfer_progress *stats;
size_t last_fired_bytes;
};
static int network_packetsize(size_t received, void *payload)
{
struct network_packetsize_payload *npp = (struct network_packetsize_payload*)payload;
/* Accumulate bytes */
npp->stats->received_bytes += received;
/* Fire notification if the threshold is reached */
if ((npp->stats->received_bytes - npp->last_fired_bytes) > NETWORK_XFER_THRESHOLD) {
npp->last_fired_bytes = npp->stats->received_bytes;
if (npp->callback(npp->stats, npp->payload))
return GIT_EUSER;
}
return 0;
}
int git_smart__download_pack(
git_transport *transport,
git_repository *repo,
git_transfer_progress *stats,
git_transfer_progress_cb transfer_progress_cb,
void *progress_payload)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_odb *odb;
struct git_odb_writepack *writepack = NULL;
int error = 0;
struct network_packetsize_payload npp = {0};
memset(stats, 0, sizeof(git_transfer_progress));
if (transfer_progress_cb) {
npp.callback = transfer_progress_cb;
npp.payload = progress_payload;
npp.stats = stats;
t->packetsize_cb = &network_packetsize;
t->packetsize_payload = &npp;
/* We might have something in the buffer already from negotiate_fetch */
if (t->buffer.offset > 0 && !t->cancelled.val)
if (t->packetsize_cb(t->buffer.offset, t->packetsize_payload))
git_atomic_set(&t->cancelled, 1);
}
if ((error = git_repository_odb__weakptr(&odb, repo)) < 0 ||
((error = git_odb_write_pack(&writepack, odb, transfer_progress_cb, progress_payload)) != 0))
goto done;
/*
* If the remote doesn't support the side-band, we can feed
* the data directly to the pack writer. Otherwise, we need to
* check which one belongs there.
*/
if (!t->caps.side_band && !t->caps.side_band_64k) {
error = no_sideband(t, writepack, buf, stats);
goto done;
}
do {
git_pkt *pkt = NULL;
/* Check cancellation before network call */
if (t->cancelled.val) {
giterr_clear();
error = GIT_EUSER;
goto done;
}
if ((error = recv_pkt(&pkt, buf)) >= 0) {
/* Check cancellation after network call */
if (t->cancelled.val) {
giterr_clear();
error = GIT_EUSER;
} else if (pkt->type == GIT_PKT_PROGRESS) {
if (t->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = t->progress_cb(p->data, p->len, t->message_cb_payload);
}
} else if (pkt->type == GIT_PKT_DATA) {
git_pkt_data *p = (git_pkt_data *) pkt;
if (p->len)
error = writepack->append(writepack, p->data, p->len, stats);
} else if (pkt->type == GIT_PKT_FLUSH) {
/* A flush indicates the end of the packfile */
git__free(pkt);
break;
}
}
git__free(pkt);
if (error < 0)
goto done;
} while (1);
/*
* Trailing execution of transfer_progress_cb, if necessary...
* Only the callback through the npp datastructure currently
* updates the last_fired_bytes value. It is possible that
* progress has already been reported with the correct
* "received_bytes" value, but until (if?) this is unified
* then we will report progress again to be sure that the
* correct last received_bytes value is reported.
*/
if (npp.callback && npp.stats->received_bytes > npp.last_fired_bytes) {
error = npp.callback(npp.stats, npp.payload);
if (error != 0)
goto done;
}
error = writepack->commit(writepack, stats);
done:
if (writepack)
writepack->free(writepack);
if (transfer_progress_cb) {
t->packetsize_cb = NULL;
t->packetsize_payload = NULL;
}
return error;
}
static int gen_pktline(git_buf *buf, git_push *push)
{
push_spec *spec;
size_t i, len;
char old_id[GIT_OID_HEXSZ+1], new_id[GIT_OID_HEXSZ+1];
old_id[GIT_OID_HEXSZ] = '\0'; new_id[GIT_OID_HEXSZ] = '\0';
git_vector_foreach(&push->specs, i, spec) {
len = 2*GIT_OID_HEXSZ + 7 + strlen(spec->refspec.dst);
if (i == 0) {
++len; /* '\0' */
if (push->report_status)
len += strlen(GIT_CAP_REPORT_STATUS) + 1;
len += strlen(GIT_CAP_SIDE_BAND_64K) + 1;
}
git_oid_fmt(old_id, &spec->roid);
git_oid_fmt(new_id, &spec->loid);
git_buf_printf(buf, "%04"PRIxZ"%s %s %s", len, old_id, new_id, spec->refspec.dst);
if (i == 0) {
git_buf_putc(buf, '\0');
/* Core git always starts their capabilities string with a space */
if (push->report_status) {
git_buf_putc(buf, ' ');
git_buf_printf(buf, GIT_CAP_REPORT_STATUS);
}
git_buf_putc(buf, ' ');
git_buf_printf(buf, GIT_CAP_SIDE_BAND_64K);
}
git_buf_putc(buf, '\n');
}
git_buf_puts(buf, "0000");
return git_buf_oom(buf) ? -1 : 0;
}
static int add_push_report_pkt(git_push *push, git_pkt *pkt)
{
push_status *status;
switch (pkt->type) {
case GIT_PKT_OK:
status = git__calloc(1, sizeof(push_status));
GITERR_CHECK_ALLOC(status);
status->msg = NULL;
status->ref = git__strdup(((git_pkt_ok *)pkt)->ref);
if (!status->ref ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_NG:
status = git__calloc(1, sizeof(push_status));
GITERR_CHECK_ALLOC(status);
status->ref = git__strdup(((git_pkt_ng *)pkt)->ref);
status->msg = git__strdup(((git_pkt_ng *)pkt)->msg);
if (!status->ref || !status->msg ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_UNPACK:
push->unpack_ok = ((git_pkt_unpack *)pkt)->unpack_ok;
break;
case GIT_PKT_FLUSH:
return GIT_ITEROVER;
default:
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
return 0;
}
static int add_push_report_sideband_pkt(git_push *push, git_pkt_data *data_pkt, git_buf *data_pkt_buf)
{
git_pkt *pkt;
const char *line, *line_end;
size_t line_len;
int error;
int reading_from_buf = data_pkt_buf->size > 0;
if (reading_from_buf) {
/* We had an existing partial packet, so add the new
* packet to the buffer and parse the whole thing */
git_buf_put(data_pkt_buf, data_pkt->data, data_pkt->len);
line = data_pkt_buf->ptr;
line_len = data_pkt_buf->size;
}
else {
line = data_pkt->data;
line_len = data_pkt->len;
}
while (line_len > 0) {
error = git_pkt_parse_line(&pkt, line, &line_end, line_len);
if (error == GIT_EBUFS) {
/* Buffer the data when the inner packet is split
* across multiple sideband packets */
if (!reading_from_buf)
git_buf_put(data_pkt_buf, line, line_len);
error = 0;
goto done;
}
else if (error < 0)
goto done;
/* Advance in the buffer */
line_len -= (line_end - line);
line = line_end;
/* When a valid packet with no content has been
* read, git_pkt_parse_line does not report an
* error, but the pkt pointer has not been set.
* Handle this by skipping over empty packets.
*/
if (pkt == NULL)
continue;
error = add_push_report_pkt(push, pkt);
git_pkt_free(pkt);
if (error < 0 && error != GIT_ITEROVER)
goto done;
}
error = 0;
done:
if (reading_from_buf)
git_buf_consume(data_pkt_buf, line_end);
return error;
}
static int parse_report(transport_smart *transport, git_push *push)
{
git_pkt *pkt = NULL;
const char *line_end = NULL;
gitno_buffer *buf = &transport->buffer;
int error, recvd;
git_buf data_pkt_buf = GIT_BUF_INIT;
for (;;) {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data,
&line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS) {
error = -1;
goto done;
}
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0) {
error = recvd;
goto done;
}
if (recvd == 0) {
giterr_set(GITERR_NET, "early EOF");
error = GIT_EEOF;
goto done;
}
continue;
}
gitno_consume(buf, line_end);
error = 0;
if (pkt == NULL)
continue;
switch (pkt->type) {
case GIT_PKT_DATA:
/* This is a sideband packet which contains other packets */
error = add_push_report_sideband_pkt(push, (git_pkt_data *)pkt, &data_pkt_buf);
break;
case GIT_PKT_ERR:
giterr_set(GITERR_NET, "report-status: Error reported: %s",
((git_pkt_err *)pkt)->error);
error = -1;
break;
case GIT_PKT_PROGRESS:
if (transport->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = transport->progress_cb(p->data, p->len, transport->message_cb_payload);
}
break;
default:
error = add_push_report_pkt(push, pkt);
break;
}
git_pkt_free(pkt);
/* add_push_report_pkt returns GIT_ITEROVER when it receives a flush */
if (error == GIT_ITEROVER) {
error = 0;
if (data_pkt_buf.size > 0) {
/* If there was data remaining in the pack data buffer,
* then the server sent a partial pkt-line */
giterr_set(GITERR_NET, "Incomplete pack data pkt-line");
error = GIT_ERROR;
}
goto done;
}
if (error < 0) {
goto done;
}
}
done:
git_buf_free(&data_pkt_buf);
return error;
}
static int add_ref_from_push_spec(git_vector *refs, push_spec *push_spec)
{
git_pkt_ref *added = git__calloc(1, sizeof(git_pkt_ref));
GITERR_CHECK_ALLOC(added);
added->type = GIT_PKT_REF;
git_oid_cpy(&added->head.oid, &push_spec->loid);
added->head.name = git__strdup(push_spec->refspec.dst);
if (!added->head.name ||
git_vector_insert(refs, added) < 0) {
git_pkt_free((git_pkt *)added);
return -1;
}
return 0;
}
static int update_refs_from_report(
git_vector *refs,
git_vector *push_specs,
git_vector *push_report)
{
git_pkt_ref *ref;
push_spec *push_spec;
push_status *push_status;
size_t i, j, refs_len;
int cmp;
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report */
if (push_specs->length != push_report->length) {
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
/* We require that push_specs be sorted with push_spec_rref_cmp,
* and that push_report be sorted with push_status_ref_cmp */
git_vector_sort(push_specs);
git_vector_sort(push_report);
git_vector_foreach(push_specs, i, push_spec) {
push_status = git_vector_get(push_report, i);
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report which matches */
if (strcmp(push_spec->refspec.dst, push_status->ref)) {
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
}
/* We require that refs be sorted with ref_name_cmp */
git_vector_sort(refs);
i = j = 0;
refs_len = refs->length;
/* Merge join push_specs with refs */
while (i < push_specs->length && j < refs_len) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
ref = git_vector_get(refs, j);
cmp = strcmp(push_spec->refspec.dst, ref->head.name);
/* Iterate appropriately */
if (cmp <= 0) i++;
if (cmp >= 0) j++;
/* Add case */
if (cmp < 0 &&
!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
/* Update case, delete case */
if (cmp == 0 &&
!push_status->msg)
git_oid_cpy(&ref->head.oid, &push_spec->loid);
}
for (; i < push_specs->length; i++) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
/* Add case */
if (!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
}
/* Remove any refs which we updated to have a zero OID. */
git_vector_rforeach(refs, i, ref) {
if (git_oid_iszero(&ref->head.oid)) {
git_vector_remove(refs, i);
git_pkt_free((git_pkt *)ref);
}
}
git_vector_sort(refs);
return 0;
}
struct push_packbuilder_payload
{
git_smart_subtransport_stream *stream;
git_packbuilder *pb;
git_push_transfer_progress cb;
void *cb_payload;
size_t last_bytes;
double last_progress_report_time;
};
static int stream_thunk(void *buf, size_t size, void *data)
{
int error = 0;
struct push_packbuilder_payload *payload = data;
if ((error = payload->stream->write(payload->stream, (const char *)buf, size)) < 0)
return error;
if (payload->cb) {
double current_time = git__timer();
payload->last_bytes += size;
if ((current_time - payload->last_progress_report_time) >= MIN_PROGRESS_UPDATE_INTERVAL) {
payload->last_progress_report_time = current_time;
error = payload->cb(payload->pb->nr_written, payload->pb->nr_objects, payload->last_bytes, payload->cb_payload);
}
}
return error;
}
int git_smart__push(git_transport *transport, git_push *push, const git_remote_callbacks *cbs)
{
transport_smart *t = (transport_smart *)transport;
struct push_packbuilder_payload packbuilder_payload = {0};
git_buf pktline = GIT_BUF_INIT;
int error = 0, need_pack = 0;
push_spec *spec;
unsigned int i;
packbuilder_payload.pb = push->pb;
if (cbs && cbs->push_transfer_progress) {
packbuilder_payload.cb = cbs->push_transfer_progress;
packbuilder_payload.cb_payload = cbs->payload;
}
#ifdef PUSH_DEBUG
{
git_remote_head *head;
char hex[GIT_OID_HEXSZ+1]; hex[GIT_OID_HEXSZ] = '\0';
git_vector_foreach(&push->remote->refs, i, head) {
git_oid_fmt(hex, &head->oid);
fprintf(stderr, "%s (%s)\n", hex, head->name);
}
git_vector_foreach(&push->specs, i, spec) {
git_oid_fmt(hex, &spec->roid);
fprintf(stderr, "%s (%s) -> ", hex, spec->lref);
git_oid_fmt(hex, &spec->loid);
fprintf(stderr, "%s (%s)\n", hex, spec->rref ?
spec->rref : spec->lref);
}
}
#endif
/*
* Figure out if we need to send a packfile; which is in all
* cases except when we only send delete commands
*/
git_vector_foreach(&push->specs, i, spec) {
if (spec->refspec.src && spec->refspec.src[0] != '\0') {
need_pack = 1;
break;
}
}
if ((error = git_smart__get_push_stream(t, &packbuilder_payload.stream)) < 0 ||
(error = gen_pktline(&pktline, push)) < 0 ||
(error = packbuilder_payload.stream->write(packbuilder_payload.stream, git_buf_cstr(&pktline), git_buf_len(&pktline))) < 0)
goto done;
if (need_pack &&
(error = git_packbuilder_foreach(push->pb, &stream_thunk, &packbuilder_payload)) < 0)
goto done;
/* If we sent nothing or the server doesn't support report-status, then
* we consider the pack to have been unpacked successfully */
if (!push->specs.length || !push->report_status)
push->unpack_ok = 1;
else if ((error = parse_report(t, push)) < 0)
goto done;
/* If progress is being reported write the final report */
if (cbs && cbs->push_transfer_progress) {
error = cbs->push_transfer_progress(
push->pb->nr_written,
push->pb->nr_objects,
packbuilder_payload.last_bytes,
cbs->payload);
if (error < 0)
goto done;
}
if (push->status.length) {
error = update_refs_from_report(&t->refs, &push->specs, &push->status);
if (error < 0)
goto done;
error = git_smart__update_heads(t, NULL);
}
done:
git_buf_free(&pktline);
return error;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_4809_1 |
crossvul-cpp_data_good_3223_0 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2003 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* I/O Stream Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
/* The configuration header file should be included first. */
#include "jasper/jas_config.h"
#include <assert.h>
#if defined(JAS_HAVE_FCNTL_H)
#include <fcntl.h>
#endif
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <ctype.h>
#if defined(JAS_HAVE_UNISTD_H)
#include <unistd.h>
#endif
#if defined(WIN32) || defined(JAS_HAVE_IO_H)
#include <io.h>
#endif
#include "jasper/jas_debug.h"
#include "jasper/jas_types.h"
#include "jasper/jas_stream.h"
#include "jasper/jas_malloc.h"
#include "jasper/jas_math.h"
/******************************************************************************\
* Local function prototypes.
\******************************************************************************/
static int jas_strtoopenmode(const char *s);
static void jas_stream_destroy(jas_stream_t *stream);
static jas_stream_t *jas_stream_create(void);
static void jas_stream_initbuf(jas_stream_t *stream, int bufmode, char *buf,
int bufsize);
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt);
static int mem_write(jas_stream_obj_t *obj, char *buf, int cnt);
static long mem_seek(jas_stream_obj_t *obj, long offset, int origin);
static int mem_close(jas_stream_obj_t *obj);
static int sfile_read(jas_stream_obj_t *obj, char *buf, int cnt);
static int sfile_write(jas_stream_obj_t *obj, char *buf, int cnt);
static long sfile_seek(jas_stream_obj_t *obj, long offset, int origin);
static int sfile_close(jas_stream_obj_t *obj);
static int file_read(jas_stream_obj_t *obj, char *buf, int cnt);
static int file_write(jas_stream_obj_t *obj, char *buf, int cnt);
static long file_seek(jas_stream_obj_t *obj, long offset, int origin);
static int file_close(jas_stream_obj_t *obj);
/******************************************************************************\
* Local data.
\******************************************************************************/
static jas_stream_ops_t jas_stream_fileops = {
file_read,
file_write,
file_seek,
file_close
};
static jas_stream_ops_t jas_stream_sfileops = {
sfile_read,
sfile_write,
sfile_seek,
sfile_close
};
static jas_stream_ops_t jas_stream_memops = {
mem_read,
mem_write,
mem_seek,
mem_close
};
/******************************************************************************\
* Code for opening and closing streams.
\******************************************************************************/
static jas_stream_t *jas_stream_create()
{
jas_stream_t *stream;
if (!(stream = jas_malloc(sizeof(jas_stream_t)))) {
return 0;
}
stream->openmode_ = 0;
stream->bufmode_ = 0;
stream->flags_ = 0;
stream->bufbase_ = 0;
stream->bufstart_ = 0;
stream->bufsize_ = 0;
stream->ptr_ = 0;
stream->cnt_ = 0;
stream->ops_ = 0;
stream->obj_ = 0;
stream->rwcnt_ = 0;
stream->rwlimit_ = -1;
return stream;
}
#if 0
/* Obsolete code. */
jas_stream_t *jas_stream_memopen(char *buf, int bufsize)
{
jas_stream_t *stream;
jas_stream_memobj_t *obj;
JAS_DBGLOG(100, ("jas_stream_memopen(%p, %d)\n", buf, bufsize));
if (!(stream = jas_stream_create())) {
return 0;
}
/* A stream associated with a memory buffer is always opened
for both reading and writing in binary mode. */
stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY;
/* Since the stream data is already resident in memory, buffering
is not necessary. */
/* But... It still may be faster to use buffering anyways. */
jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);
/* Select the operations for a memory stream. */
stream->ops_ = &jas_stream_memops;
/* Allocate memory for the underlying memory stream object. */
if (!(obj = jas_malloc(sizeof(jas_stream_memobj_t)))) {
jas_stream_destroy(stream);
return 0;
}
stream->obj_ = (void *) obj;
/* Initialize a few important members of the memory stream object. */
obj->myalloc_ = 0;
obj->buf_ = 0;
/* If the buffer size specified is nonpositive, then the buffer
is allocated internally and automatically grown as needed. */
if (bufsize <= 0) {
obj->bufsize_ = 1024;
obj->growable_ = 1;
} else {
obj->bufsize_ = bufsize;
obj->growable_ = 0;
}
if (buf) {
obj->buf_ = (unsigned char *) buf;
} else {
obj->buf_ = jas_malloc(obj->bufsize_);
obj->myalloc_ = 1;
}
if (!obj->buf_) {
jas_stream_close(stream);
return 0;
}
JAS_DBGLOG(100, ("jas_stream_memopen buffer buf=%p myalloc=%d\n",
obj->buf_, obj->myalloc_));
if (bufsize > 0 && buf) {
/* If a buffer was supplied by the caller and its length is positive,
make the associated buffer data appear in the stream initially. */
obj->len_ = bufsize;
} else {
/* The stream is initially empty. */
obj->len_ = 0;
}
obj->pos_ = 0;
return stream;
}
#else
/*
This function will eventually replace jas_stream_memopen.
If buf is 0 and bufsize > 0:
a buffer is dynamically allocated with size bufsize and this buffer is
not growable.
If buf is 0 and bufsize is 0:
a buffer is dynamically allocated whose size will automatically grow to
accommodate the amount of data written.
If buf is not 0:
bufsize (which, in this case, is not currently allowed to be zero) is
the size of the (nongrowable) buffer pointed to by buf.
*/
jas_stream_t *jas_stream_memopen2(char *buf, size_t bufsize)
{
jas_stream_t *stream;
jas_stream_memobj_t *obj;
JAS_DBGLOG(100, ("jas_stream_memopen2(%p, %zu)\n", buf, bufsize));
assert((buf && bufsize > 0) || (!buf));
if (!(stream = jas_stream_create())) {
return 0;
}
/* A stream associated with a memory buffer is always opened
for both reading and writing in binary mode. */
stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY;
/* Since the stream data is already resident in memory, buffering
is not necessary. */
/* But... It still may be faster to use buffering anyways. */
jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);
/* Select the operations for a memory stream. */
stream->ops_ = &jas_stream_memops;
/* Allocate memory for the underlying memory stream object. */
if (!(obj = jas_malloc(sizeof(jas_stream_memobj_t)))) {
jas_stream_destroy(stream);
return 0;
}
stream->obj_ = (void *) obj;
/* Initialize a few important members of the memory stream object. */
obj->myalloc_ = 0;
obj->buf_ = 0;
/* If the buffer size specified is nonpositive, then the buffer
is allocated internally and automatically grown as needed. */
if (!bufsize) {
obj->bufsize_ = 1024;
obj->growable_ = 1;
} else {
obj->bufsize_ = bufsize;
obj->growable_ = 0;
}
if (buf) {
obj->buf_ = JAS_CAST(unsigned char *, buf);
} else {
obj->buf_ = jas_malloc(obj->bufsize_);
obj->myalloc_ = 1;
}
if (!obj->buf_) {
jas_stream_close(stream);
return 0;
}
JAS_DBGLOG(100, ("jas_stream_memopen2 buffer buf=%p myalloc=%d\n",
obj->buf_, obj->myalloc_));
if (bufsize > 0 && buf) {
/* If a buffer was supplied by the caller and its length is positive,
make the associated buffer data appear in the stream initially. */
obj->len_ = bufsize;
} else {
/* The stream is initially empty. */
obj->len_ = 0;
}
obj->pos_ = 0;
return stream;
}
/*
NOTE:
The version of the function jas_stream_memopen only exists for backwards
compatibility.
Eventually, it should be replaced by jas_stream_memopen2.
In retrospect, it was a very poor choice to have specified the buffer
size parameter (bufsize) to have type int. On some machines, int may only
be a 16-bit integer. This precludes larger-sized buffer allocations, which
are needed in practice.
If bufsize <= 0, the buffer is growable; otherwise, the buffer has a fixed
size of bufsize.
If buf is 0, the buffer is dynamically allocated with jas_malloc.
If buf is not 0 and bufsize <= 0 (which is not permitted in any
circumstances), bad things will happen (especially if the buf was not
allocated with jas_malloc).
*/
jas_stream_t *jas_stream_memopen(char *buf, int bufsize)
{
char *new_buf;
size_t new_bufsize;
JAS_DBGLOG(100, ("jas_stream_memopen(%p, %d)\n", buf, bufsize));
if (bufsize < 0) {
jas_deprecated("negative buffer size for jas_stream_memopen");
}
if (buf && bufsize <= 0) {
// This was never a valid thing to do with the old API.
jas_eprintf("Invalid use of jas_stream_memopen detected.\n");
jas_deprecated("A user-provided buffer for "
"jas_stream_memopen cannot be growable.\n");
}
if (bufsize <= 0) {
new_bufsize = 0;
new_buf = 0;
} else {
new_bufsize = bufsize;
new_buf = buf;
}
return jas_stream_memopen2(new_buf, new_bufsize);
}
#endif
jas_stream_t *jas_stream_fopen(const char *filename, const char *mode)
{
jas_stream_t *stream;
jas_stream_fileobj_t *obj;
int openflags;
JAS_DBGLOG(100, ("jas_stream_fopen(\"%s\", \"%s\")\n", filename, mode));
/* Allocate a stream object. */
if (!(stream = jas_stream_create())) {
return 0;
}
/* Parse the mode string. */
stream->openmode_ = jas_strtoopenmode(mode);
/* Determine the correct flags to use for opening the file. */
if ((stream->openmode_ & JAS_STREAM_READ) &&
(stream->openmode_ & JAS_STREAM_WRITE)) {
openflags = O_RDWR;
} else if (stream->openmode_ & JAS_STREAM_READ) {
openflags = O_RDONLY;
} else if (stream->openmode_ & JAS_STREAM_WRITE) {
openflags = O_WRONLY;
} else {
openflags = 0;
}
if (stream->openmode_ & JAS_STREAM_APPEND) {
openflags |= O_APPEND;
}
if (stream->openmode_ & JAS_STREAM_BINARY) {
openflags |= O_BINARY;
}
if (stream->openmode_ & JAS_STREAM_CREATE) {
openflags |= O_CREAT | O_TRUNC;
}
/* Allocate space for the underlying file stream object. */
if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) {
jas_stream_destroy(stream);
return 0;
}
obj->fd = -1;
obj->flags = 0;
obj->pathname[0] = '\0';
stream->obj_ = (void *) obj;
/* Select the operations for a file stream object. */
stream->ops_ = &jas_stream_fileops;
/* Open the underlying file. */
if ((obj->fd = open(filename, openflags, JAS_STREAM_PERMS)) < 0) {
// Free the underlying file object, since it will not otherwise
// be freed.
jas_free(obj);
jas_stream_destroy(stream);
return 0;
}
/* By default, use full buffering for this type of stream. */
jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);
return stream;
}
jas_stream_t *jas_stream_freopen(const char *path, const char *mode, FILE *fp)
{
jas_stream_t *stream;
int openflags;
JAS_DBGLOG(100, ("jas_stream_freopen(\"%s\", \"%s\", %p)\n", path, mode,
fp));
/* Eliminate compiler warning about unused variable. */
path = 0;
/* Allocate a stream object. */
if (!(stream = jas_stream_create())) {
return 0;
}
/* Parse the mode string. */
stream->openmode_ = jas_strtoopenmode(mode);
/* Determine the correct flags to use for opening the file. */
if ((stream->openmode_ & JAS_STREAM_READ) &&
(stream->openmode_ & JAS_STREAM_WRITE)) {
openflags = O_RDWR;
} else if (stream->openmode_ & JAS_STREAM_READ) {
openflags = O_RDONLY;
} else if (stream->openmode_ & JAS_STREAM_WRITE) {
openflags = O_WRONLY;
} else {
openflags = 0;
}
if (stream->openmode_ & JAS_STREAM_APPEND) {
openflags |= O_APPEND;
}
if (stream->openmode_ & JAS_STREAM_BINARY) {
openflags |= O_BINARY;
}
if (stream->openmode_ & JAS_STREAM_CREATE) {
openflags |= O_CREAT | O_TRUNC;
}
stream->obj_ = JAS_CAST(void *, fp);
/* Select the operations for a file stream object. */
stream->ops_ = &jas_stream_sfileops;
/* By default, use full buffering for this type of stream. */
jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);
return stream;
}
jas_stream_t *jas_stream_tmpfile()
{
jas_stream_t *stream;
jas_stream_fileobj_t *obj;
JAS_DBGLOG(100, ("jas_stream_tmpfile()\n"));
if (!(stream = jas_stream_create())) {
return 0;
}
/* A temporary file stream is always opened for both reading and
writing in binary mode. */
stream->openmode_ = JAS_STREAM_READ | JAS_STREAM_WRITE | JAS_STREAM_BINARY;
/* Allocate memory for the underlying temporary file object. */
if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) {
jas_stream_destroy(stream);
return 0;
}
obj->fd = -1;
obj->flags = 0;
obj->pathname[0] = '\0';
stream->obj_ = obj;
/* Choose a file name. */
tmpnam(obj->pathname);
/* Open the underlying file. */
if ((obj->fd = open(obj->pathname, O_CREAT | O_EXCL | O_RDWR | O_TRUNC | O_BINARY,
JAS_STREAM_PERMS)) < 0) {
jas_stream_destroy(stream);
return 0;
}
/* Unlink the file so that it will disappear if the program
terminates abnormally. */
/* Under UNIX, one can unlink an open file and continue to do I/O
on it. Not all operating systems support this functionality, however.
For example, under Microsoft Windows the unlink operation will fail,
since the file is open. */
if (unlink(obj->pathname)) {
/* We will try unlinking the file again after it is closed. */
obj->flags |= JAS_STREAM_FILEOBJ_DELONCLOSE;
}
/* Use full buffering. */
jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);
stream->ops_ = &jas_stream_fileops;
return stream;
}
jas_stream_t *jas_stream_fdopen(int fd, const char *mode)
{
jas_stream_t *stream;
jas_stream_fileobj_t *obj;
JAS_DBGLOG(100, ("jas_stream_fdopen(%d, \"%s\")\n", fd, mode));
/* Allocate a stream object. */
if (!(stream = jas_stream_create())) {
return 0;
}
/* Parse the mode string. */
stream->openmode_ = jas_strtoopenmode(mode);
#if defined(WIN32)
/* Argh!!! Someone ought to banish text mode (i.e., O_TEXT) to the
greatest depths of purgatory! */
/* Ensure that the file descriptor is in binary mode, if the caller
has specified the binary mode flag. Arguably, the caller ought to
take care of this, but text mode is a ugly wart anyways, so we save
the caller some grief by handling this within the stream library. */
/* This ugliness is mainly for the benefit of those who run the
JasPer software under Windows from shells that insist on opening
files in text mode. For example, in the Cygwin environment,
shells often open files in text mode when I/O redirection is
used. Grr... */
if (stream->openmode_ & JAS_STREAM_BINARY) {
setmode(fd, O_BINARY);
}
#endif
/* Allocate space for the underlying file stream object. */
if (!(obj = jas_malloc(sizeof(jas_stream_fileobj_t)))) {
jas_stream_destroy(stream);
return 0;
}
obj->fd = fd;
obj->flags = 0;
obj->pathname[0] = '\0';
stream->obj_ = (void *) obj;
/* Do not close the underlying file descriptor when the stream is
closed. */
obj->flags |= JAS_STREAM_FILEOBJ_NOCLOSE;
/* By default, use full buffering for this type of stream. */
jas_stream_initbuf(stream, JAS_STREAM_FULLBUF, 0, 0);
/* Select the operations for a file stream object. */
stream->ops_ = &jas_stream_fileops;
return stream;
}
static void jas_stream_destroy(jas_stream_t *stream)
{
JAS_DBGLOG(100, ("jas_stream_destroy(%p)\n", stream));
/* If the memory for the buffer was allocated with malloc, free
this memory. */
if ((stream->bufmode_ & JAS_STREAM_FREEBUF) && stream->bufbase_) {
JAS_DBGLOG(100, ("jas_stream_destroy freeing buffer %p\n",
stream->bufbase_));
jas_free(stream->bufbase_);
stream->bufbase_ = 0;
}
jas_free(stream);
}
int jas_stream_close(jas_stream_t *stream)
{
JAS_DBGLOG(100, ("jas_stream_close(%p)\n", stream));
/* Flush buffer if necessary. */
jas_stream_flush(stream);
/* Close the underlying stream object. */
(*stream->ops_->close_)(stream->obj_);
jas_stream_destroy(stream);
return 0;
}
/******************************************************************************\
* Code for reading and writing streams.
\******************************************************************************/
int jas_stream_getc_func(jas_stream_t *stream)
{
assert(stream->ptr_ - stream->bufbase_ <= stream->bufsize_ +
JAS_STREAM_MAXPUTBACK);
return jas_stream_getc_macro(stream);
}
int jas_stream_putc_func(jas_stream_t *stream, int c)
{
assert(stream->ptr_ - stream->bufstart_ <= stream->bufsize_);
return jas_stream_putc_macro(stream, c);
}
int jas_stream_ungetc(jas_stream_t *stream, int c)
{
if (!stream->ptr_ || stream->ptr_ == stream->bufbase_) {
return -1;
}
/* Reset the EOF indicator (since we now have at least one character
to read). */
stream->flags_ &= ~JAS_STREAM_EOF;
--stream->rwcnt_;
--stream->ptr_;
++stream->cnt_;
*stream->ptr_ = c;
return 0;
}
/* FIXME integral type */
int jas_stream_read(jas_stream_t *stream, void *buf, int cnt)
{
int n;
int c;
char *bufptr;
JAS_DBGLOG(100, ("jas_stream_read(%p, %p, %d)\n", stream, buf, cnt));
if (cnt < 0) {
jas_deprecated("negative count for jas_stream_read");
}
bufptr = buf;
n = 0;
while (n < cnt) {
if ((c = jas_stream_getc(stream)) == EOF) {
return n;
}
*bufptr++ = c;
++n;
}
return n;
}
/* FIXME integral type */
int jas_stream_write(jas_stream_t *stream, const void *buf, int cnt)
{
int n;
const char *bufptr;
JAS_DBGLOG(100, ("jas_stream_write(%p, %p, %d)\n", stream, buf, cnt));
if (cnt < 0) {
jas_deprecated("negative count for jas_stream_write");
}
bufptr = buf;
n = 0;
while (n < cnt) {
if (jas_stream_putc(stream, *bufptr) == EOF) {
return n;
}
++bufptr;
++n;
}
return n;
}
/* Note: This function uses a fixed size buffer. Therefore, it cannot
handle invocations that will produce more output than can be held
by the buffer. */
int jas_stream_printf(jas_stream_t *stream, const char *fmt, ...)
{
va_list ap;
char buf[4096];
int ret;
va_start(ap, fmt);
ret = vsnprintf(buf, sizeof buf, fmt, ap);
jas_stream_puts(stream, buf);
va_end(ap);
return ret;
}
int jas_stream_puts(jas_stream_t *stream, const char *s)
{
while (*s != '\0') {
if (jas_stream_putc_macro(stream, *s) == EOF) {
return -1;
}
++s;
}
return 0;
}
/* FIXME integral type */
char *jas_stream_gets(jas_stream_t *stream, char *buf, int bufsize)
{
int c;
char *bufptr;
assert(bufsize > 0);
JAS_DBGLOG(100, ("jas_stream_gets(%p, %p, %d)\n", stream, buf, bufsize));
bufptr = buf;
while (bufsize > 1) {
if ((c = jas_stream_getc(stream)) == EOF) {
break;
}
*bufptr++ = c;
--bufsize;
if (c == '\n') {
break;
}
}
*bufptr = '\0';
return buf;
}
/* FIXME integral type */
int jas_stream_gobble(jas_stream_t *stream, int n)
{
int m;
JAS_DBGLOG(100, ("jas_stream_gobble(%p, %d)\n", stream, n));
if (n < 0) {
jas_deprecated("negative count for jas_stream_gobble");
}
m = n;
for (m = n; m > 0; --m) {
if (jas_stream_getc(stream) == EOF) {
return n - m;
}
}
return n;
}
/* FIXME integral type */
int jas_stream_pad(jas_stream_t *stream, int n, int c)
{
int m;
JAS_DBGLOG(100, ("jas_stream_pad(%p, %d, %d)\n", stream, n, c));
if (n < 0) {
jas_deprecated("negative count for jas_stream_pad");
}
m = n;
for (m = n; m > 0; --m) {
if (jas_stream_putc(stream, c) == EOF)
return n - m;
}
return n;
}
/******************************************************************************\
* Code for getting and setting the stream position.
\******************************************************************************/
int jas_stream_isseekable(jas_stream_t *stream)
{
if (stream->ops_ == &jas_stream_memops) {
return 1;
} else if (stream->ops_ == &jas_stream_fileops) {
if ((*stream->ops_->seek_)(stream->obj_, 0, SEEK_CUR) < 0) {
return 0;
}
return 1;
} else {
return 0;
}
}
int jas_stream_rewind(jas_stream_t *stream)
{
JAS_DBGLOG(100, ("jas_stream_rewind(%p)\n", stream));
return jas_stream_seek(stream, 0, SEEK_SET);
}
long jas_stream_seek(jas_stream_t *stream, long offset, int origin)
{
long newpos;
JAS_DBGLOG(100, ("jas_stream_seek(%p, %ld, %d)\n", stream, offset,
origin));
/* The buffer cannot be in use for both reading and writing. */
assert(!((stream->bufmode_ & JAS_STREAM_RDBUF) && (stream->bufmode_ &
JAS_STREAM_WRBUF)));
/* Reset the EOF indicator (since we may not be at the EOF anymore). */
stream->flags_ &= ~JAS_STREAM_EOF;
if (stream->bufmode_ & JAS_STREAM_RDBUF) {
if (origin == SEEK_CUR) {
offset -= stream->cnt_;
}
} else if (stream->bufmode_ & JAS_STREAM_WRBUF) {
if (jas_stream_flush(stream)) {
return -1;
}
}
stream->cnt_ = 0;
stream->ptr_ = stream->bufstart_;
stream->bufmode_ &= ~(JAS_STREAM_RDBUF | JAS_STREAM_WRBUF);
if ((newpos = (*stream->ops_->seek_)(stream->obj_, offset, origin))
< 0) {
return -1;
}
return newpos;
}
long jas_stream_tell(jas_stream_t *stream)
{
int adjust;
int offset;
JAS_DBGLOG(100, ("jas_stream_tell(%p)\n", stream));
if (stream->bufmode_ & JAS_STREAM_RDBUF) {
adjust = -stream->cnt_;
} else if (stream->bufmode_ & JAS_STREAM_WRBUF) {
adjust = stream->ptr_ - stream->bufstart_;
} else {
adjust = 0;
}
if ((offset = (*stream->ops_->seek_)(stream->obj_, 0, SEEK_CUR)) < 0) {
return -1;
}
return offset + adjust;
}
/******************************************************************************\
* Buffer initialization code.
\******************************************************************************/
/* FIXME integral type */
static void jas_stream_initbuf(jas_stream_t *stream, int bufmode, char *buf,
int bufsize)
{
/* If this function is being called, the buffer should not have been
initialized yet. */
assert(!stream->bufbase_);
if (bufmode != JAS_STREAM_UNBUF) {
/* The full- or line-buffered mode is being employed. */
if (!buf) {
/* The caller has not specified a buffer to employ, so allocate
one. */
if ((stream->bufbase_ = jas_malloc(JAS_STREAM_BUFSIZE +
JAS_STREAM_MAXPUTBACK))) {
stream->bufmode_ |= JAS_STREAM_FREEBUF;
stream->bufsize_ = JAS_STREAM_BUFSIZE;
} else {
/* The buffer allocation has failed. Resort to unbuffered
operation. */
stream->bufbase_ = stream->tinybuf_;
stream->bufsize_ = 1;
}
} else {
/* The caller has specified a buffer to employ. */
/* The buffer must be large enough to accommodate maximum
putback. */
assert(bufsize > JAS_STREAM_MAXPUTBACK);
stream->bufbase_ = JAS_CAST(jas_uchar *, buf);
stream->bufsize_ = bufsize - JAS_STREAM_MAXPUTBACK;
}
} else {
/* The unbuffered mode is being employed. */
/* A buffer should not have been supplied by the caller. */
assert(!buf);
/* Use a trivial one-character buffer. */
stream->bufbase_ = stream->tinybuf_;
stream->bufsize_ = 1;
}
stream->bufstart_ = &stream->bufbase_[JAS_STREAM_MAXPUTBACK];
stream->ptr_ = stream->bufstart_;
stream->cnt_ = 0;
stream->bufmode_ |= bufmode & JAS_STREAM_BUFMODEMASK;
}
/******************************************************************************\
* Buffer filling and flushing code.
\******************************************************************************/
int jas_stream_flush(jas_stream_t *stream)
{
if (stream->bufmode_ & JAS_STREAM_RDBUF) {
return 0;
}
return jas_stream_flushbuf(stream, EOF);
}
int jas_stream_fillbuf(jas_stream_t *stream, int getflag)
{
int c;
/* The stream must not be in an error or EOF state. */
if ((stream->flags_ & (JAS_STREAM_ERRMASK)) != 0) {
return EOF;
}
/* The stream must be open for reading. */
if ((stream->openmode_ & JAS_STREAM_READ) == 0) {
return EOF;
}
/* Make a half-hearted attempt to confirm that the buffer is not
currently being used for writing. This check is not intended
to be foolproof! */
assert((stream->bufmode_ & JAS_STREAM_WRBUF) == 0);
assert(stream->ptr_ - stream->bufstart_ <= stream->bufsize_);
/* Mark the buffer as being used for reading. */
stream->bufmode_ |= JAS_STREAM_RDBUF;
/* Read new data into the buffer. */
stream->ptr_ = stream->bufstart_;
if ((stream->cnt_ = (*stream->ops_->read_)(stream->obj_,
(char *) stream->bufstart_, stream->bufsize_)) <= 0) {
if (stream->cnt_ < 0) {
stream->flags_ |= JAS_STREAM_ERR;
} else {
stream->flags_ |= JAS_STREAM_EOF;
}
stream->cnt_ = 0;
return EOF;
}
assert(stream->cnt_ > 0);
/* Get or peek at the first character in the buffer. */
c = (getflag) ? jas_stream_getc2(stream) : (*stream->ptr_);
return c;
}
int jas_stream_flushbuf(jas_stream_t *stream, int c)
{
int len;
int n;
/* The stream should not be in an error or EOF state. */
if ((stream->flags_ & (JAS_STREAM_ERRMASK)) != 0) {
return EOF;
}
/* The stream must be open for writing. */
if ((stream->openmode_ & (JAS_STREAM_WRITE | JAS_STREAM_APPEND)) == 0) {
return EOF;
}
/* The buffer should not currently be in use for reading. */
assert(!(stream->bufmode_ & JAS_STREAM_RDBUF));
/* Note: Do not use the quantity stream->cnt to determine the number
of characters in the buffer! Depending on how this function was
called, the stream->cnt value may be "off-by-one". */
len = stream->ptr_ - stream->bufstart_;
if (len > 0) {
n = (*stream->ops_->write_)(stream->obj_, (char *)
stream->bufstart_, len);
if (n != len) {
stream->flags_ |= JAS_STREAM_ERR;
return EOF;
}
}
stream->cnt_ = stream->bufsize_;
stream->ptr_ = stream->bufstart_;
stream->bufmode_ |= JAS_STREAM_WRBUF;
if (c != EOF) {
assert(stream->cnt_ > 0);
return jas_stream_putc2(stream, c);
}
return 0;
}
/******************************************************************************\
* Miscellaneous code.
\******************************************************************************/
static int jas_strtoopenmode(const char *s)
{
int openmode = 0;
while (*s != '\0') {
switch (*s) {
case 'r':
openmode |= JAS_STREAM_READ;
break;
case 'w':
openmode |= JAS_STREAM_WRITE | JAS_STREAM_CREATE;
break;
case 'b':
openmode |= JAS_STREAM_BINARY;
break;
case 'a':
openmode |= JAS_STREAM_APPEND;
break;
case '+':
openmode |= JAS_STREAM_READ | JAS_STREAM_WRITE;
break;
default:
break;
}
++s;
}
return openmode;
}
/* FIXME integral type */
int jas_stream_copy(jas_stream_t *out, jas_stream_t *in, int n)
{
int all;
int c;
int m;
all = (n < 0) ? 1 : 0;
m = n;
while (all || m > 0) {
if ((c = jas_stream_getc_macro(in)) == EOF) {
/* The next character of input could not be read. */
/* Return with an error if an I/O error occured
(not including EOF) or if an explicit copy count
was specified. */
return (!all || jas_stream_error(in)) ? (-1) : 0;
}
if (jas_stream_putc_macro(out, c) == EOF) {
return -1;
}
--m;
}
return 0;
}
/* FIXME integral type */
long jas_stream_setrwcount(jas_stream_t *stream, long rwcnt)
{
int old;
old = stream->rwcnt_;
stream->rwcnt_ = rwcnt;
return old;
}
/* FIXME integral type */
int jas_stream_display(jas_stream_t *stream, FILE *fp, int n)
{
unsigned char buf[16];
int i;
int j;
int m;
int c;
int display;
int cnt;
cnt = n - (n % 16);
display = 1;
for (i = 0; i < n; i += 16) {
if (n > 16 && i > 0) {
display = (i >= cnt) ? 1 : 0;
}
if (display) {
fprintf(fp, "%08x:", i);
}
m = JAS_MIN(n - i, 16);
for (j = 0; j < m; ++j) {
if ((c = jas_stream_getc(stream)) == EOF) {
abort();
return -1;
}
buf[j] = c;
}
if (display) {
for (j = 0; j < m; ++j) {
fprintf(fp, " %02x", buf[j]);
}
fputc(' ', fp);
for (; j < 16; ++j) {
fprintf(fp, " ");
}
for (j = 0; j < m; ++j) {
if (isprint(buf[j])) {
fputc(buf[j], fp);
} else {
fputc(' ', fp);
}
}
fprintf(fp, "\n");
}
}
return 0;
}
long jas_stream_length(jas_stream_t *stream)
{
long oldpos;
long pos;
if ((oldpos = jas_stream_tell(stream)) < 0) {
return -1;
}
if (jas_stream_seek(stream, 0, SEEK_END) < 0) {
return -1;
}
if ((pos = jas_stream_tell(stream)) < 0) {
return -1;
}
if (jas_stream_seek(stream, oldpos, SEEK_SET) < 0) {
return -1;
}
return pos;
}
/******************************************************************************\
* Memory stream object.
\******************************************************************************/
/* FIXME integral type */
static int mem_read(jas_stream_obj_t *obj, char *buf, int cnt)
{
ssize_t n;
assert(cnt >= 0);
assert(buf);
JAS_DBGLOG(100, ("mem_read(%p, %p, %d)\n", obj, buf, cnt));
jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj;
n = m->len_ - m->pos_;
cnt = JAS_MIN(n, cnt);
memcpy(buf, &m->buf_[m->pos_], cnt);
m->pos_ += cnt;
return cnt;
}
static int mem_resize(jas_stream_memobj_t *m, size_t bufsize)
{
unsigned char *buf;
//assert(m->buf_);
//assert(bufsize >= 0);
JAS_DBGLOG(100, ("mem_resize(%p, %zu)\n", m, bufsize));
if (!bufsize) {
jas_eprintf(
"mem_resize was not really designed to handle a buffer of size 0\n"
"This may not work.\n"
);
}
if (!(buf = jas_realloc2(m->buf_, bufsize, sizeof(unsigned char))) &&
bufsize) {
JAS_DBGLOG(100, ("mem_resize realloc failed\n"));
return -1;
}
JAS_DBGLOG(100, ("mem_resize realloc succeeded\n"));
m->buf_ = buf;
m->bufsize_ = bufsize;
return 0;
}
/* FIXME integral type */
static int mem_write(jas_stream_obj_t *obj, char *buf, int cnt)
{
size_t n;
int ret;
jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj;
size_t newbufsize;
size_t newpos;
assert(buf);
assert(cnt >= 0);
JAS_DBGLOG(100, ("mem_write(%p, %p, %d)\n", obj, buf, cnt));
newpos = m->pos_ + cnt;
if (newpos > m->bufsize_ && m->growable_) {
newbufsize = m->bufsize_;
while (newbufsize < newpos) {
//newbufsize <<= 1;
if (!jas_safe_size_mul(newbufsize, 2, &newbufsize)) {
JAS_DBGLOG(100, ("new buffer size would cause overflow\n"));
return -1;
}
}
JAS_DBGLOG(100, ("mem_write resizing from %d to %zu\n", m->bufsize_,
newbufsize));
assert(newbufsize > 0);
if (mem_resize(m, newbufsize)) {
return -1;
}
}
if (m->pos_ > m->len_) {
/* The current position is beyond the end of the file, so
pad the file to the current position with zeros. */
n = JAS_MIN(m->pos_, m->bufsize_) - m->len_;
if (n > 0) {
memset(&m->buf_[m->len_], 0, n);
m->len_ += n;
}
if (m->pos_ != m->len_) {
/* The buffer is not big enough. */
return 0;
}
}
n = m->bufsize_ - m->pos_;
ret = JAS_MIN(n, cnt);
if (ret > 0) {
memcpy(&m->buf_[m->pos_], buf, ret);
m->pos_ += ret;
}
if (m->pos_ > m->len_) {
m->len_ = m->pos_;
}
assert(ret == cnt);
return ret;
}
/* FIXME integral type */
static long mem_seek(jas_stream_obj_t *obj, long offset, int origin)
{
jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj;
size_t newpos;
JAS_DBGLOG(100, ("mem_seek(%p, %ld, %d)\n", obj, offset, origin));
switch (origin) {
case SEEK_SET:
newpos = offset;
break;
case SEEK_END:
newpos = m->len_ - offset;
break;
case SEEK_CUR:
newpos = m->pos_ + offset;
break;
default:
abort();
break;
}
if (newpos < 0) {
return -1;
}
m->pos_ = newpos;
return m->pos_;
}
static int mem_close(jas_stream_obj_t *obj)
{
JAS_DBGLOG(100, ("mem_close(%p)\n", obj));
jas_stream_memobj_t *m = (jas_stream_memobj_t *)obj;
JAS_DBGLOG(100, ("mem_close myalloc=%d\n", m->myalloc_));
if (m->myalloc_ && m->buf_) {
JAS_DBGLOG(100, ("mem_close freeing buffer %p\n", m->buf_));
jas_free(m->buf_);
m->buf_ = 0;
}
jas_free(obj);
return 0;
}
/******************************************************************************\
* File stream object.
\******************************************************************************/
/* FIXME integral type */
static int file_read(jas_stream_obj_t *obj, char *buf, int cnt)
{
jas_stream_fileobj_t *fileobj;
JAS_DBGLOG(100, ("file_read(%p, %p, %d)\n", obj, buf, cnt));
fileobj = JAS_CAST(jas_stream_fileobj_t *, obj);
return read(fileobj->fd, buf, cnt);
}
/* FIXME integral type */
static int file_write(jas_stream_obj_t *obj, char *buf, int cnt)
{
jas_stream_fileobj_t *fileobj;
JAS_DBGLOG(100, ("file_write(%p, %p, %d)\n", obj, buf, cnt));
fileobj = JAS_CAST(jas_stream_fileobj_t *, obj);
return write(fileobj->fd, buf, cnt);
}
/* FIXME integral type */
static long file_seek(jas_stream_obj_t *obj, long offset, int origin)
{
jas_stream_fileobj_t *fileobj;
JAS_DBGLOG(100, ("file_seek(%p, %ld, %d)\n", obj, offset, origin));
fileobj = JAS_CAST(jas_stream_fileobj_t *, obj);
return lseek(fileobj->fd, offset, origin);
}
static int file_close(jas_stream_obj_t *obj)
{
jas_stream_fileobj_t *fileobj;
JAS_DBGLOG(100, ("file_close(%p)\n", obj));
fileobj = JAS_CAST(jas_stream_fileobj_t *, obj);
int ret;
ret = close(fileobj->fd);
if (fileobj->flags & JAS_STREAM_FILEOBJ_DELONCLOSE) {
unlink(fileobj->pathname);
}
jas_free(fileobj);
return ret;
}
/******************************************************************************\
* Stdio file stream object.
\******************************************************************************/
/* FIXME integral type */
static int sfile_read(jas_stream_obj_t *obj, char *buf, int cnt)
{
FILE *fp;
size_t n;
int result;
JAS_DBGLOG(100, ("sfile_read(%p, %p, %d)\n", obj, buf, cnt));
fp = JAS_CAST(FILE *, obj);
n = fread(buf, 1, cnt, fp);
if (n != cnt) {
result = (!ferror(fp) && feof(fp)) ? 0 : -1;
}
result = JAS_CAST(int, n);
return result;
}
/* FIXME integral type */
static int sfile_write(jas_stream_obj_t *obj, char *buf, int cnt)
{
FILE *fp;
size_t n;
JAS_DBGLOG(100, ("sfile_write(%p, %p, %d)\n", obj, buf, cnt));
fp = JAS_CAST(FILE *, obj);
n = fwrite(buf, 1, cnt, fp);
return (n != JAS_CAST(size_t, cnt)) ? (-1) : cnt;
}
/* FIXME integral type */
static long sfile_seek(jas_stream_obj_t *obj, long offset, int origin)
{
FILE *fp;
JAS_DBGLOG(100, ("sfile_seek(%p, %ld, %d)\n", obj, offset, origin));
fp = JAS_CAST(FILE *, obj);
return fseek(fp, offset, origin);
}
static int sfile_close(jas_stream_obj_t *obj)
{
FILE *fp;
JAS_DBGLOG(100, ("sfile_close(%p)\n", obj));
fp = JAS_CAST(FILE *, obj);
return fclose(fp);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3223_0 |
crossvul-cpp_data_good_3060_8 | /* Key type used to cache DNS lookups made by the kernel
*
* See Documentation/networking/dns_resolver.txt
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov (niallain@gmail.com)
* Steve French (sfrench@us.ibm.com)
* Wang Lei (wang840925@gmail.com)
* David Howells (dhowells@redhat.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <keys/dns_resolver-type.h>
#include <keys/user-type.h>
#include "internal.h"
MODULE_DESCRIPTION("DNS Resolver");
MODULE_AUTHOR("Wang Lei");
MODULE_LICENSE("GPL");
unsigned int dns_resolver_debug;
module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
const struct cred *dns_resolver_cache;
#define DNS_ERRORNO_OPTION "dnserror"
/*
* Preparse instantiation data for a dns_resolver key.
*
* The data must be a NUL-terminated string, with the NUL char accounted in
* datalen.
*
* If the data contains a '#' characters, then we take the clause after each
* one to be an option of the form 'key=value'. The actual data of interest is
* the string leading up to the first '#'. For instance:
*
* "ip1,ip2,...#foo=bar"
*/
static int
dns_resolver_preparse(struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload;
unsigned long derrno;
int ret;
int datalen = prep->datalen, result_len = 0;
const char *data = prep->data, *end, *opt;
kenter("'%*.*s',%u", datalen, datalen, data, datalen);
if (datalen <= 1 || !data || data[datalen - 1] != '\0')
return -EINVAL;
datalen--;
/* deal with any options embedded in the data */
end = data + datalen;
opt = memchr(data, '#', datalen);
if (!opt) {
/* no options: the entire data is the result */
kdebug("no options");
result_len = datalen;
} else {
const char *next_opt;
result_len = opt - data;
opt++;
kdebug("options: '%s'", opt);
do {
const char *eq;
int opt_len, opt_nlen, opt_vlen, tmp;
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
if (!opt_len) {
printk(KERN_WARNING
"Empty option to dns_resolver key\n");
return -EINVAL;
}
eq = memchr(opt, '=', opt_len) ?: end;
opt_nlen = eq - opt;
eq++;
opt_vlen = next_opt - eq; /* will be -1 if no value */
tmp = opt_vlen >= 0 ? opt_vlen : 0;
kdebug("option '%*.*s' val '%*.*s'",
opt_nlen, opt_nlen, opt, tmp, tmp, eq);
/* see if it's an error number representing a DNS error
* that's to be recorded as the result in this key */
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
kdebug("dns error number option");
if (opt_vlen <= 0)
goto bad_option_value;
ret = kstrtoul(eq, 10, &derrno);
if (ret < 0)
goto bad_option_value;
if (derrno < 1 || derrno > 511)
goto bad_option_value;
kdebug("dns error no. = %lu", derrno);
prep->type_data[0] = ERR_PTR(-derrno);
continue;
}
bad_option_value:
printk(KERN_WARNING
"Option '%*.*s' to dns_resolver key:"
" bad/missing value\n",
opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
/* don't cache the result if we're caching an error saying there's no
* result */
if (prep->type_data[0]) {
kleave(" = 0 [h_error %ld]", PTR_ERR(prep->type_data[0]));
return 0;
}
kdebug("store result");
prep->quotalen = result_len;
upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL);
if (!upayload) {
kleave(" = -ENOMEM");
return -ENOMEM;
}
upayload->datalen = result_len;
memcpy(upayload->data, data, result_len);
upayload->data[result_len] = '\0';
prep->payload[0] = upayload;
kleave(" = 0");
return 0;
}
/*
* Clean up the preparse data
*/
static void dns_resolver_free_preparse(struct key_preparsed_payload *prep)
{
pr_devel("==>%s()\n", __func__);
kfree(prep->payload[0]);
}
/*
* The description is of the form "[<type>:]<domain_name>"
*
* The domain name may be a simple name or an absolute domain name (which
* should end with a period). The domain name is case-independent.
*/
static int dns_resolver_cmp(const struct key *key,
const struct key_match_data *match_data)
{
int slen, dlen, ret = 0;
const char *src = key->description, *dsp = match_data->raw_data;
kenter("%s,%s", src, dsp);
if (!src || !dsp)
goto no_match;
if (strcasecmp(src, dsp) == 0)
goto matched;
slen = strlen(src);
dlen = strlen(dsp);
if (slen <= 0 || dlen <= 0)
goto no_match;
if (src[slen - 1] == '.')
slen--;
if (dsp[dlen - 1] == '.')
dlen--;
if (slen != dlen || strncasecmp(src, dsp, slen) != 0)
goto no_match;
matched:
ret = 1;
no_match:
kleave(" = %d", ret);
return ret;
}
/*
* Preparse the match criterion.
*/
static int dns_resolver_match_preparse(struct key_match_data *match_data)
{
match_data->lookup_type = KEYRING_SEARCH_LOOKUP_ITERATE;
match_data->cmp = dns_resolver_cmp;
return 0;
}
/*
* Describe a DNS key
*/
static void dns_resolver_describe(const struct key *key, struct seq_file *m)
{
int err = key->type_data.x[0];
seq_puts(m, key->description);
if (key_is_instantiated(key)) {
if (err)
seq_printf(m, ": %d", err);
else
seq_printf(m, ": %u", key->datalen);
}
}
/*
* read the DNS data
* - the key's semaphore is read-locked
*/
static long dns_resolver_read(const struct key *key,
char __user *buffer, size_t buflen)
{
if (key->type_data.x[0])
return key->type_data.x[0];
return user_read(key, buffer, buflen);
}
struct key_type key_type_dns_resolver = {
.name = "dns_resolver",
.preparse = dns_resolver_preparse,
.free_preparse = dns_resolver_free_preparse,
.instantiate = generic_key_instantiate,
.match_preparse = dns_resolver_match_preparse,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = dns_resolver_describe,
.read = dns_resolver_read,
};
static int __init init_dns_resolver(void)
{
struct cred *cred;
struct key *keyring;
int ret;
/* create an override credential set with a special thread keyring in
* which DNS requests are cached
*
* this is used to prevent malicious redirections from being installed
* with add_key().
*/
cred = prepare_kernel_cred(NULL);
if (!cred)
return -ENOMEM;
keyring = keyring_alloc(".dns_resolver",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
ret = register_key_type(&key_type_dns_resolver);
if (ret < 0)
goto failed_put_key;
/* instruct request_key() to use this special keyring as a cache for
* the results it looks up */
set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
dns_resolver_cache = cred;
kdebug("DNS resolver keyring: %d\n", key_serial(keyring));
return 0;
failed_put_key:
key_put(keyring);
failed_put_cred:
put_cred(cred);
return ret;
}
static void __exit exit_dns_resolver(void)
{
key_revoke(dns_resolver_cache->thread_keyring);
unregister_key_type(&key_type_dns_resolver);
put_cred(dns_resolver_cache);
}
module_init(init_dns_resolver)
module_exit(exit_dns_resolver)
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3060_8 |
crossvul-cpp_data_good_1621_0 | /*
* PgBouncer - Lightweight connection pooler for PostgreSQL.
*
* Copyright (c) 2007-2009 Marko Kreen, Skype Technologies OÜ
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
* Client connection handling
*/
#include "bouncer.h"
#include <usual/pgutil.h>
static const char *hdr2hex(const struct MBuf *data, char *buf, unsigned buflen)
{
const uint8_t *bin = data->data + data->read_pos;
unsigned int dlen;
dlen = mbuf_avail_for_read(data);
return bin2hex(bin, dlen, buf, buflen);
}
static bool check_client_passwd(PgSocket *client, const char *passwd)
{
char md5[MD5_PASSWD_LEN + 1];
const char *correct;
PgUser *user = client->auth_user;
/* auth_user may be missing */
if (!user) {
slog_error(client, "Password packet before auth packet?");
return false;
}
/* disallow empty passwords */
if (!*passwd || !*user->passwd)
return false;
switch (cf_auth_type) {
case AUTH_PLAIN:
return strcmp(user->passwd, passwd) == 0;
case AUTH_CRYPT:
correct = crypt(user->passwd, (char *)client->tmp_login_salt);
return correct && strcmp(correct, passwd) == 0;
case AUTH_MD5:
if (strlen(passwd) != MD5_PASSWD_LEN)
return false;
if (!isMD5(user->passwd))
pg_md5_encrypt(user->passwd, user->name, strlen(user->name), user->passwd);
pg_md5_encrypt(user->passwd + 3, (char *)client->tmp_login_salt, 4, md5);
return strcmp(md5, passwd) == 0;
}
return false;
}
/* mask to get offset into valid_crypt_salt[] */
#define SALT_MASK 0x3F
static const char valid_crypt_salt[] =
"./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
static bool send_client_authreq(PgSocket *client)
{
uint8_t saltlen = 0;
int res;
int auth = cf_auth_type;
uint8_t randbuf[2];
if (auth == AUTH_CRYPT) {
saltlen = 2;
get_random_bytes(randbuf, saltlen);
client->tmp_login_salt[0] = valid_crypt_salt[randbuf[0] & SALT_MASK];
client->tmp_login_salt[1] = valid_crypt_salt[randbuf[1] & SALT_MASK];
client->tmp_login_salt[2] = 0;
} else if (cf_auth_type == AUTH_MD5) {
saltlen = 4;
get_random_bytes((void*)client->tmp_login_salt, saltlen);
} else if (auth == AUTH_ANY)
auth = AUTH_TRUST;
SEND_generic(res, client, 'R', "ib", auth, client->tmp_login_salt, saltlen);
return res;
}
static void start_auth_request(PgSocket *client, const char *username)
{
int res;
PktBuf *buf;
client->auth_user = client->db->auth_user;
/* have to fetch user info from db */
client->pool = get_pool(client->db, client->db->auth_user);
if (!find_server(client)) {
client->wait_for_user_conn = true;
return;
}
slog_noise(client, "Doing auth_conn query");
client->wait_for_user_conn = false;
client->wait_for_user = true;
if (!sbuf_pause(&client->sbuf)) {
release_server(client->link);
disconnect_client(client, true, "pause failed");
return;
}
client->link->ready = 0;
res = 0;
buf = pktbuf_dynamic(512);
if (buf) {
pktbuf_write_ExtQuery(buf, cf_auth_query, 1, username);
res = pktbuf_send_immediate(buf, client->link);
pktbuf_free(buf);
/*
* Should do instead:
* res = pktbuf_send_queued(buf, client->link);
* but that needs better integration with SBuf.
*/
}
if (!res)
disconnect_server(client->link, false, "unable to send login query");
}
static bool finish_set_pool(PgSocket *client, bool takeover)
{
PgUser *user = client->auth_user;
/* pool user may be forced */
if (client->db->forced_user) {
user = client->db->forced_user;
}
client->pool = get_pool(client->db, user);
if (!client->pool) {
disconnect_client(client, true, "no memory for pool");
return false;
}
if (cf_log_connections)
slog_info(client, "login attempt: db=%s user=%s", client->db->name, client->auth_user->name);
if (!check_fast_fail(client))
return false;
if (takeover)
return true;
if (client->pool->db->admin) {
if (!admin_post_login(client))
return false;
}
if (cf_auth_type <= AUTH_TRUST || client->own_user) {
if (!finish_client_login(client))
return false;
} else {
if (!send_client_authreq(client)) {
disconnect_client(client, false, "failed to send auth req");
return false;
}
}
return true;
}
bool set_pool(PgSocket *client, const char *dbname, const char *username, const char *password, bool takeover)
{
/* find database */
client->db = find_database(dbname);
if (!client->db) {
client->db = register_auto_database(dbname);
if (!client->db) {
disconnect_client(client, true, "No such database: %s", dbname);
if (cf_log_connections)
slog_info(client, "login failed: db=%s user=%s", dbname, username);
return false;
}
else {
slog_info(client, "registered new auto-database: db = %s", dbname );
}
}
/* are new connections allowed? */
if (client->db->db_disabled) {
disconnect_client(client, true, "database does not allow connections: %s", dbname);
return false;
}
if (client->db->admin) {
if (admin_pre_login(client, username))
return finish_set_pool(client, takeover);
}
/* find user */
if (cf_auth_type == AUTH_ANY) {
/* ignore requested user */
if (client->db->forced_user == NULL) {
slog_error(client, "auth_type=any requires forced user");
disconnect_client(client, true, "bouncer config error");
return false;
}
client->auth_user = client->db->forced_user;
} else {
/* the user clients wants to log in as */
client->auth_user = find_user(username);
if (!client->auth_user && client->db->auth_user) {
if (takeover) {
client->auth_user = add_db_user(client->db, username, password);
return finish_set_pool(client, takeover);
}
start_auth_request(client, username);
return false;
}
if (!client->auth_user) {
disconnect_client(client, true, "No such user: %s", username);
if (cf_log_connections)
slog_info(client, "login failed: db=%s user=%s", dbname, username);
return false;
}
}
return finish_set_pool(client, takeover);
}
bool handle_auth_response(PgSocket *client, PktHdr *pkt) {
uint16_t columns;
uint32_t length;
const char *username, *password;
PgUser user;
PgSocket *server = client->link;
switch(pkt->type) {
case 'T': /* RowDescription */
if (!mbuf_get_uint16be(&pkt->data, &columns)) {
disconnect_server(server, false, "bad packet");
return false;
}
if (columns != 2u) {
disconnect_server(server, false, "expected 1 column from login query, not %hu", columns);
return false;
}
break;
case 'D': /* DataRow */
memset(&user, 0, sizeof(user));
if (!mbuf_get_uint16be(&pkt->data, &columns)) {
disconnect_server(server, false, "bad packet");
return false;
}
if (columns != 2u) {
disconnect_server(server, false, "expected 1 column from login query, not %hu", columns);
return false;
}
if (!mbuf_get_uint32be(&pkt->data, &length)) {
disconnect_server(server, false, "bad packet");
return false;
}
if (!mbuf_get_chars(&pkt->data, length, &username)) {
disconnect_server(server, false, "bad packet");
return false;
}
if (sizeof(user.name) - 1 < length)
length = sizeof(user.name) - 1;
memcpy(user.name, username, length);
if (!mbuf_get_uint32be(&pkt->data, &length)) {
disconnect_server(server, false, "bad packet");
return false;
}
if (length == (uint32_t)-1) {
/*
* NULL - set an md5 password with an impossible value,
* so that nothing will ever match
*/
password = "md5";
length = 3;
} else {
if (!mbuf_get_chars(&pkt->data, length, &password)) {
disconnect_server(server, false, "bad packet");
return false;
}
}
if (sizeof(user.passwd) - 1 < length)
length = sizeof(user.passwd) - 1;
memcpy(user.passwd, password, length);
client->auth_user = add_db_user(client->db, user.name, user.passwd);
if (!client->auth_user) {
disconnect_server(server, false, "unable to allocate new user for auth");
return false;
}
break;
case 'C': /* CommandComplete */
break;
case '1': /* ParseComplete */
break;
case '2': /* BindComplete */
break;
case 'Z': /* ReadyForQuery */
sbuf_prepare_skip(&client->link->sbuf, pkt->len);
if (!client->auth_user) {
if (cf_log_connections)
slog_info(client, "login failed: db=%s", client->db->name);
disconnect_client(client, true, "No such user");
} else {
slog_noise(client, "auth query complete");
client->link->resetting = true;
sbuf_continue(&client->sbuf);
}
/*
* either sbuf_continue or disconnect_client could disconnect the server
* way down in their bowels of other callbacks. so check that, and
* return appropriately (similar to reuse_on_release)
*/
if (server->state == SV_FREE || server->state == SV_JUSTFREE)
return false;
return true;
default:
disconnect_server(server, false, "unexpected response from login query");
return false;
}
sbuf_prepare_skip(&server->sbuf, pkt->len);
return true;
}
static void set_appname(PgSocket *client, const char *app_name)
{
char buf[400], abuf[300];
const char *details;
if (cf_application_name_add_host) {
/* give app a name */
if (!app_name)
app_name = "app";
/* add details */
details = pga_details(&client->remote_addr, abuf, sizeof(abuf));
snprintf(buf, sizeof(buf), "%s - %s", app_name, details);
app_name = buf;
}
if (app_name) {
slog_debug(client, "using application_name: %s", app_name);
varcache_set(&client->vars, "application_name", app_name);
}
}
static bool decide_startup_pool(PgSocket *client, PktHdr *pkt)
{
const char *username = NULL, *dbname = NULL;
const char *key, *val;
bool ok;
bool appname_found = false;
while (1) {
ok = mbuf_get_string(&pkt->data, &key);
if (!ok || *key == 0)
break;
ok = mbuf_get_string(&pkt->data, &val);
if (!ok)
break;
if (strcmp(key, "database") == 0) {
slog_debug(client, "got var: %s=%s", key, val);
dbname = val;
} else if (strcmp(key, "user") == 0) {
slog_debug(client, "got var: %s=%s", key, val);
username = val;
} else if (strcmp(key, "application_name") == 0) {
set_appname(client, val);
appname_found = true;
} else if (varcache_set(&client->vars, key, val)) {
slog_debug(client, "got var: %s=%s", key, val);
} else if (strlist_contains(cf_ignore_startup_params, key)) {
slog_debug(client, "ignoring startup parameter: %s=%s", key, val);
} else {
slog_warning(client, "unsupported startup parameter: %s=%s", key, val);
disconnect_client(client, true, "Unsupported startup parameter: %s", key);
return false;
}
}
if (!username || !username[0]) {
disconnect_client(client, true, "No username supplied");
return false;
}
/* if missing dbname, default to username */
if (!dbname || !dbname[0])
dbname = username;
/* create application_name if requested */
if (!appname_found)
set_appname(client, NULL);
/* check if limit allows, don't limit admin db
nb: new incoming conn will be attached to PgSocket, thus
get_active_client_count() counts it */
if (get_active_client_count() > cf_max_client_conn) {
if (strcmp(dbname, "pgbouncer") != 0) {
disconnect_client(client, true, "no more connections allowed (max_client_conn)");
return false;
}
}
/* find pool */
return set_pool(client, dbname, username, "", false);
}
/* decide on packets of client in login phase */
static bool handle_client_startup(PgSocket *client, PktHdr *pkt)
{
const char *passwd;
const uint8_t *key;
bool ok;
SBuf *sbuf = &client->sbuf;
/* don't tolerate partial packets */
if (incomplete_pkt(pkt)) {
disconnect_client(client, true, "client sent partial pkt in startup phase");
return false;
}
if (client->wait_for_welcome) {
if (finish_client_login(client)) {
/* the packet was already parsed */
sbuf_prepare_skip(sbuf, pkt->len);
return true;
} else
return false;
}
switch (pkt->type) {
case PKT_SSLREQ:
slog_noise(client, "C: req SSL");
slog_noise(client, "P: nak");
/* reject SSL attempt */
if (!sbuf_answer(&client->sbuf, "N", 1)) {
disconnect_client(client, false, "failed to nak SSL");
return false;
}
break;
case PKT_STARTUP_V2:
disconnect_client(client, true, "Old V2 protocol not supported");
return false;
case PKT_STARTUP:
if (client->pool && !client->wait_for_user_conn && !client->wait_for_user) {
disconnect_client(client, true, "client re-sent startup pkt");
return false;
}
if (client->wait_for_user) {
client->wait_for_user = false;
if (!finish_set_pool(client, false))
return false;
} else if (!decide_startup_pool(client, pkt)) {
return false;
}
break;
case 'p': /* PasswordMessage */
/* haven't requested it */
if (cf_auth_type <= AUTH_TRUST) {
disconnect_client(client, true, "unrequested passwd pkt");
return false;
}
ok = mbuf_get_string(&pkt->data, &passwd);
if (ok && check_client_passwd(client, passwd)) {
if (!finish_client_login(client))
return false;
} else {
disconnect_client(client, true, "Auth failed");
return false;
}
break;
case PKT_CANCEL:
if (mbuf_avail_for_read(&pkt->data) == BACKENDKEY_LEN
&& mbuf_get_bytes(&pkt->data, BACKENDKEY_LEN, &key))
{
memcpy(client->cancel_key, key, BACKENDKEY_LEN);
accept_cancel_request(client);
} else
disconnect_client(client, false, "bad cancel request");
return false;
default:
disconnect_client(client, false, "bad packet");
return false;
}
sbuf_prepare_skip(sbuf, pkt->len);
client->request_time = get_cached_time();
return true;
}
/* decide on packets of logged-in client */
static bool handle_client_work(PgSocket *client, PktHdr *pkt)
{
SBuf *sbuf = &client->sbuf;
switch (pkt->type) {
/* one-packet queries */
case 'Q': /* Query */
if (cf_disable_pqexec) {
slog_error(client, "Client used 'Q' packet type.");
disconnect_client(client, true, "PQexec disallowed");
return false;
}
case 'F': /* FunctionCall */
/* request immediate response from server */
case 'H': /* Flush */
case 'S': /* Sync */
/* copy end markers */
case 'c': /* CopyDone(F/B) */
case 'f': /* CopyFail(F/B) */
/*
* extended protocol allows server (and thus pooler)
* to buffer packets until sync or flush is sent by client
*/
case 'P': /* Parse */
case 'E': /* Execute */
case 'C': /* Close */
case 'B': /* Bind */
case 'D': /* Describe */
case 'd': /* CopyData(F/B) */
/* update stats */
if (!client->query_start) {
client->pool->stats.request_count++;
client->query_start = get_cached_time();
}
if (client->pool->db->admin)
return admin_handle_client(client, pkt);
/* acquire server */
if (!find_server(client))
return false;
client->pool->stats.client_bytes += pkt->len;
/* tag the server as dirty */
client->link->ready = false;
client->link->idle_tx = false;
/* forward the packet */
sbuf_prepare_send(sbuf, &client->link->sbuf, pkt->len);
break;
/* client wants to go away */
default:
slog_error(client, "unknown pkt from client: %d/0x%x", pkt->type, pkt->type);
disconnect_client(client, true, "unknown pkt");
return false;
case 'X': /* Terminate */
disconnect_client(client, false, "client close request");
return false;
}
return true;
}
/* callback from SBuf */
bool client_proto(SBuf *sbuf, SBufEvent evtype, struct MBuf *data)
{
bool res = false;
PgSocket *client = container_of(sbuf, PgSocket, sbuf);
PktHdr pkt;
Assert(!is_server_socket(client));
Assert(client->sbuf.sock);
Assert(client->state != CL_FREE);
/* may happen if close failed */
if (client->state == CL_JUSTFREE)
return false;
switch (evtype) {
case SBUF_EV_CONNECT_OK:
case SBUF_EV_CONNECT_FAILED:
/* ^ those should not happen */
case SBUF_EV_RECV_FAILED:
disconnect_client(client, false, "client unexpected eof");
break;
case SBUF_EV_SEND_FAILED:
disconnect_server(client->link, false, "Server connection closed");
break;
case SBUF_EV_READ:
if (mbuf_avail_for_read(data) < NEW_HEADER_LEN && client->state != CL_LOGIN) {
slog_noise(client, "C: got partial header, trying to wait a bit");
return false;
}
if (!get_header(data, &pkt)) {
char hex[8*2 + 1];
disconnect_client(client, true, "bad packet header: '%s'",
hdr2hex(data, hex, sizeof(hex)));
return false;
}
slog_noise(client, "pkt='%c' len=%d", pkt_desc(&pkt), pkt.len);
client->request_time = get_cached_time();
switch (client->state) {
case CL_LOGIN:
res = handle_client_startup(client, &pkt);
break;
case CL_ACTIVE:
if (client->wait_for_welcome)
res = handle_client_startup(client, &pkt);
else
res = handle_client_work(client, &pkt);
break;
case CL_WAITING:
fatal("why waiting client in client_proto()");
default:
fatal("bad client state: %d", client->state);
}
break;
case SBUF_EV_FLUSH:
/* client is not interested in it */
break;
case SBUF_EV_PKT_CALLBACK:
/* unused ATM */
break;
}
return res;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_1621_0 |
crossvul-cpp_data_good_1060_0 | /*
pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
Under the terms of the GNU General Public License.
This is the high-level driver for parallel port ATAPI disk
drives based on chips supported by the paride module.
By default, the driver will autoprobe for a single parallel
port ATAPI disk drive, but if their individual parameters are
specified, the driver can handle up to 4 drives.
The behaviour of the pf driver can be altered by setting
some parameters from the insmod command line. The following
parameters are adjustable:
drive0 These four arguments can be arrays of
drive1 1-7 integers as follows:
drive2
drive3 <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly>
Where,
<prt> is the base of the parallel port address for
the corresponding drive. (required)
<pro> is the protocol number for the adapter that
supports this drive. These numbers are
logged by 'paride' when the protocol modules
are initialised. (0 if not given)
<uni> for those adapters that support chained
devices, this is the unit selector for the
chain of devices on the given port. It should
be zero for devices that don't support chaining.
(0 if not given)
<mod> this can be -1 to choose the best mode, or one
of the mode numbers supported by the adapter.
(-1 if not given)
<slv> ATAPI CDroms can be jumpered to master or slave.
Set this to 0 to choose the master drive, 1 to
choose the slave, -1 (the default) to choose the
first drive found.
<lun> Some ATAPI devices support multiple LUNs.
One example is the ATAPI PD/CD drive from
Matshita/Panasonic. This device has a
CD drive on LUN 0 and a PD drive on LUN 1.
By default, the driver will search for the
first LUN with a supported device. Set
this parameter to force it to use a specific
LUN. (default -1)
<dly> some parallel ports require the driver to
go more slowly. -1 sets a default value that
should work with the chosen protocol. Otherwise,
set this to a small integer, the larger it is
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
major You may use this parameter to override the
default major number (47) that this driver
will use. Be sure to change the device
name as well.
name This parameter is a character string that
contains the name the kernel will use for this
device (in /proc output, for instance).
(default "pf").
cluster The driver will attempt to aggregate requests
for adjacent blocks into larger multi-block
clusters. The maximum cluster size (in 512
byte sectors) is set with this parameter.
(default 64)
verbose This parameter controls the amount of logging
that the driver will do. Set it to 0 for
normal operation, 1 to see autoprobe progress
messages, or 2 to see additional debugging
output. (default 0)
nice This parameter controls the driver's use of
idle CPU time, at the expense of some speed.
If this driver is built into the kernel, you can use the
following command line parameters, with the same values
as the corresponding module parameters listed above:
pf.drive0
pf.drive1
pf.drive2
pf.drive3
pf.cluster
pf.nice
In addition, you can use the parameter pf.disable to disable
the driver entirely.
*/
/* Changes:
1.01 GRG 1998.05.03 Changes for SMP. Eliminate sti().
Fix for drives that don't clear STAT_ERR
until after next CDB delivered.
Small change in pf_completion to round
up transfer size.
1.02 GRG 1998.06.16 Eliminated an Ugh
1.03 GRG 1998.08.16 Use HZ in loop timings, extra debugging
1.04 GRG 1998.09.24 Added jumbo support
*/
#define PF_VERSION "1.04"
#define PF_MAJOR 47
#define PF_NAME "pf"
#define PF_UNITS 4
#include <linux/types.h>
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is off
by default.
*/
static bool verbose = 0;
static int major = PF_MAJOR;
static char *name = PF_NAME;
static int cluster = 64;
static int nice = 0;
static int disable = 0;
static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 };
static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3};
static int pf_drive_count;
enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
/* end of parameters */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/hdreg.h>
#include <linux/cdrom.h>
#include <linux/spinlock.h>
#include <linux/blk-mq.h>
#include <linux/blkpg.h>
#include <linux/mutex.h>
#include <linux/uaccess.h>
static DEFINE_MUTEX(pf_mutex);
static DEFINE_SPINLOCK(pf_spin_lock);
module_param(verbose, bool, 0644);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(cluster, int, 0);
module_param(nice, int, 0);
module_param_array(drive0, int, NULL, 0);
module_param_array(drive1, int, NULL, 0);
module_param_array(drive2, int, NULL, 0);
module_param_array(drive3, int, NULL, 0);
#include "paride.h"
#include "pseudo.h"
/* constants for faking geometry numbers */
#define PF_FD_MAX 8192 /* use FD geometry under this size */
#define PF_FD_HDS 2
#define PF_FD_SPT 18
#define PF_HD_HDS 64
#define PF_HD_SPT 32
#define PF_MAX_RETRIES 5
#define PF_TMO 800 /* interrupt timeout in jiffies */
#define PF_SPIN_DEL 50 /* spin delay in micro-seconds */
#define PF_SPIN (1000000*PF_TMO)/(HZ*PF_SPIN_DEL)
#define STAT_ERR 0x00001
#define STAT_INDEX 0x00002
#define STAT_ECC 0x00004
#define STAT_DRQ 0x00008
#define STAT_SEEK 0x00010
#define STAT_WRERR 0x00020
#define STAT_READY 0x00040
#define STAT_BUSY 0x00080
#define ATAPI_REQ_SENSE 0x03
#define ATAPI_LOCK 0x1e
#define ATAPI_DOOR 0x1b
#define ATAPI_MODE_SENSE 0x5a
#define ATAPI_CAPACITY 0x25
#define ATAPI_IDENTIFY 0x12
#define ATAPI_READ_10 0x28
#define ATAPI_WRITE_10 0x2a
static int pf_open(struct block_device *bdev, fmode_t mode);
static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
static int pf_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static void pf_release(struct gendisk *disk, fmode_t mode);
static int pf_detect(void);
static void do_pf_read(void);
static void do_pf_read_start(void);
static void do_pf_write(void);
static void do_pf_write_start(void);
static void do_pf_read_drq(void);
static void do_pf_write_done(void);
#define PF_NM 0
#define PF_RO 1
#define PF_RW 2
#define PF_NAMELEN 8
struct pf_unit {
struct pi_adapter pia; /* interface to paride layer */
struct pi_adapter *pi;
int removable; /* removable media device ? */
int media_status; /* media present ? WP ? */
int drive; /* drive */
int lun;
int access; /* count of active opens ... */
int present; /* device present ? */
char name[PF_NAMELEN]; /* pf0, pf1, ... */
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
struct list_head rq_list;
};
static struct pf_unit units[PF_UNITS];
static int pf_identify(struct pf_unit *pf);
static void pf_lock(struct pf_unit *pf, int func);
static void pf_eject(struct pf_unit *pf);
static unsigned int pf_check_events(struct gendisk *disk,
unsigned int clearing);
static char pf_scratch[512]; /* scratch block buffer */
/* the variables below are used mainly in the I/O request engine, which
processes only one request at a time.
*/
static int pf_retries = 0; /* i/o error retry count */
static int pf_busy = 0; /* request being processed ? */
static struct request *pf_req; /* current request */
static int pf_block; /* address of next requested block */
static int pf_count; /* number of blocks still to do */
static int pf_run; /* sectors in current cluster */
static int pf_cmd; /* current command READ/WRITE */
static struct pf_unit *pf_current;/* unit of current request */
static int pf_mask; /* stopper for pseudo-int */
static char *pf_buf; /* buffer for request in progress */
static void *par_drv; /* reference of parport driver */
/* kernel glue structures */
static const struct block_device_operations pf_fops = {
.owner = THIS_MODULE,
.open = pf_open,
.release = pf_release,
.ioctl = pf_ioctl,
.getgeo = pf_getgeo,
.check_events = pf_check_events,
};
static const struct blk_mq_ops pf_mq_ops = {
.queue_rq = pf_queue_rq,
};
static void __init pf_init_units(void)
{
struct pf_unit *pf;
int unit;
pf_drive_count = 0;
for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
struct gendisk *disk;
disk = alloc_disk(1);
if (!disk)
continue;
disk->queue = blk_mq_init_sq_queue(&pf->tag_set, &pf_mq_ops,
1, BLK_MQ_F_SHOULD_MERGE);
if (IS_ERR(disk->queue)) {
put_disk(disk);
disk->queue = NULL;
continue;
}
INIT_LIST_HEAD(&pf->rq_list);
disk->queue->queuedata = pf;
blk_queue_max_segments(disk->queue, cluster);
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
pf->disk = disk;
pf->pi = &pf->pia;
pf->media_status = PF_NM;
pf->drive = (*drives[unit])[D_SLV];
pf->lun = (*drives[unit])[D_LUN];
snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
disk->major = major;
disk->first_minor = unit;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
if (!(*drives[unit])[D_PRT])
pf_drive_count++;
}
}
static int pf_open(struct block_device *bdev, fmode_t mode)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
int ret;
mutex_lock(&pf_mutex);
pf_identify(pf);
ret = -ENODEV;
if (pf->media_status == PF_NM)
goto out;
ret = -EROFS;
if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE))
goto out;
ret = 0;
pf->access++;
if (pf->removable)
pf_lock(pf, 1);
out:
mutex_unlock(&pf_mutex);
return ret;
}
static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
sector_t capacity = get_capacity(pf->disk);
if (capacity < PF_FD_MAX) {
geo->cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
geo->heads = PF_FD_HDS;
geo->sectors = PF_FD_SPT;
} else {
geo->cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
geo->heads = PF_HD_HDS;
geo->sectors = PF_HD_SPT;
}
return 0;
}
static int pf_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
if (cmd != CDROMEJECT)
return -EINVAL;
if (pf->access != 1)
return -EBUSY;
mutex_lock(&pf_mutex);
pf_eject(pf);
mutex_unlock(&pf_mutex);
return 0;
}
static void pf_release(struct gendisk *disk, fmode_t mode)
{
struct pf_unit *pf = disk->private_data;
mutex_lock(&pf_mutex);
if (pf->access <= 0) {
mutex_unlock(&pf_mutex);
WARN_ON(1);
return;
}
pf->access--;
if (!pf->access && pf->removable)
pf_lock(pf, 0);
mutex_unlock(&pf_mutex);
}
static unsigned int pf_check_events(struct gendisk *disk, unsigned int clearing)
{
return DISK_EVENT_MEDIA_CHANGE;
}
static inline int status_reg(struct pf_unit *pf)
{
return pi_read_regr(pf->pi, 1, 6);
}
static inline int read_reg(struct pf_unit *pf, int reg)
{
return pi_read_regr(pf->pi, 0, reg);
}
static inline void write_reg(struct pf_unit *pf, int reg, int val)
{
pi_write_regr(pf->pi, 0, reg, val);
}
static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
{
int j, r, e, s, p;
j = 0;
while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop))))
&& (j++ < PF_SPIN))
udelay(PF_SPIN_DEL);
if ((r & (STAT_ERR & stop)) || (j > PF_SPIN)) {
s = read_reg(pf, 7);
e = read_reg(pf, 1);
p = read_reg(pf, 2);
if (j > PF_SPIN)
e |= 0x100;
if (fun)
printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
" loop=%d phase=%d\n",
pf->name, fun, msg, r, s, e, j, p);
return (e << 8) + s;
}
return 0;
}
static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun)
{
pi_connect(pf->pi);
write_reg(pf, 6, 0xa0+0x10*pf->drive);
if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
pi_disconnect(pf->pi);
return -1;
}
write_reg(pf, 4, dlen % 256);
write_reg(pf, 5, dlen / 256);
write_reg(pf, 7, 0xa0); /* ATAPI packet command */
if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
pi_disconnect(pf->pi);
return -1;
}
if (read_reg(pf, 2) != 1) {
printk("%s: %s: command phase error\n", pf->name, fun);
pi_disconnect(pf->pi);
return -1;
}
pi_write_block(pf->pi, cmd, 12);
return 0;
}
static int pf_completion(struct pf_unit *pf, char *buf, char *fun)
{
int r, s, n;
r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
fun, "completion");
if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) {
n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) +
3) & 0xfffc);
pi_read_block(pf->pi, buf, n);
}
s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
pi_disconnect(pf->pi);
return (r ? r : s);
}
static void pf_req_sense(struct pf_unit *pf, int quiet)
{
char rs_cmd[12] =
{ ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
char buf[16];
int r;
r = pf_command(pf, rs_cmd, 16, "Request sense");
mdelay(1);
if (!r)
pf_completion(pf, buf, "Request sense");
if ((!r) && (!quiet))
printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
pf->name, buf[2] & 0xf, buf[12], buf[13]);
}
static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun)
{
int r;
r = pf_command(pf, cmd, dlen, fun);
mdelay(1);
if (!r)
r = pf_completion(pf, buf, fun);
if (r)
pf_req_sense(pf, !fun);
return r;
}
static void pf_lock(struct pf_unit *pf, int func)
{
char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock");
}
static void pf_eject(struct pf_unit *pf)
{
char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
pf_lock(pf, 0);
pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject");
}
#define PF_RESET_TMO 30 /* in tenths of a second */
static void pf_sleep(int cs)
{
schedule_timeout_interruptible(cs);
}
/* the ATAPI standard actually specifies the contents of all 7 registers
after a reset, but the specification is ambiguous concerning the last
two bytes, and different drives interpret the standard differently.
*/
static int pf_reset(struct pf_unit *pf)
{
int i, k, flg;
int expect[5] = { 1, 1, 1, 0x14, 0xeb };
pi_connect(pf->pi);
write_reg(pf, 6, 0xa0+0x10*pf->drive);
write_reg(pf, 7, 8);
pf_sleep(20 * HZ / 1000);
k = 0;
while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY))
pf_sleep(HZ / 10);
flg = 1;
for (i = 0; i < 5; i++)
flg &= (read_reg(pf, i + 1) == expect[i]);
if (verbose) {
printk("%s: Reset (%d) signature = ", pf->name, k);
for (i = 0; i < 5; i++)
printk("%3x", read_reg(pf, i + 1));
if (!flg)
printk(" (incorrect)");
printk("\n");
}
pi_disconnect(pf->pi);
return flg - 1;
}
static void pf_mode_sense(struct pf_unit *pf)
{
char ms_cmd[12] =
{ ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
char buf[8];
pf_atapi(pf, ms_cmd, 8, buf, "mode sense");
pf->media_status = PF_RW;
if (buf[3] & 0x80)
pf->media_status = PF_RO;
}
static void xs(char *buf, char *targ, int offs, int len)
{
int j, k, l;
j = 0;
l = 0;
for (k = 0; k < len; k++)
if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
l = targ[j++] = buf[k + offs];
if (l == 0x20)
j--;
targ[j] = 0;
}
static int xl(char *buf, int offs)
{
int v, k;
v = 0;
for (k = 0; k < 4; k++)
v = v * 256 + (buf[k + offs] & 0xff);
return v;
}
static void pf_get_capacity(struct pf_unit *pf)
{
char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
char buf[8];
int bs;
if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) {
pf->media_status = PF_NM;
return;
}
set_capacity(pf->disk, xl(buf, 0) + 1);
bs = xl(buf, 4);
if (bs != 512) {
set_capacity(pf->disk, 0);
if (verbose)
printk("%s: Drive %d, LUN %d,"
" unsupported block size %d\n",
pf->name, pf->drive, pf->lun, bs);
}
}
static int pf_identify(struct pf_unit *pf)
{
int dt, s;
char *ms[2] = { "master", "slave" };
char mf[10], id[18];
char id_cmd[12] =
{ ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
char buf[36];
s = pf_atapi(pf, id_cmd, 36, buf, "identify");
if (s)
return -1;
dt = buf[0] & 0x1f;
if ((dt != 0) && (dt != 7)) {
if (verbose)
printk("%s: Drive %d, LUN %d, unsupported type %d\n",
pf->name, pf->drive, pf->lun, dt);
return -1;
}
xs(buf, mf, 8, 8);
xs(buf, id, 16, 16);
pf->removable = (buf[1] & 0x80);
pf_mode_sense(pf);
pf_mode_sense(pf);
pf_mode_sense(pf);
pf_get_capacity(pf);
printk("%s: %s %s, %s LUN %d, type %d",
pf->name, mf, id, ms[pf->drive], pf->lun, dt);
if (pf->removable)
printk(", removable");
if (pf->media_status == PF_NM)
printk(", no media\n");
else {
if (pf->media_status == PF_RO)
printk(", RO");
printk(", %llu blocks\n",
(unsigned long long)get_capacity(pf->disk));
}
return 0;
}
/* returns 0, with id set if drive is detected
-1, if drive detection failed
*/
static int pf_probe(struct pf_unit *pf)
{
if (pf->drive == -1) {
for (pf->drive = 0; pf->drive <= 1; pf->drive++)
if (!pf_reset(pf)) {
if (pf->lun != -1)
return pf_identify(pf);
else
for (pf->lun = 0; pf->lun < 8; pf->lun++)
if (!pf_identify(pf))
return 0;
}
} else {
if (pf_reset(pf))
return -1;
if (pf->lun != -1)
return pf_identify(pf);
for (pf->lun = 0; pf->lun < 8; pf->lun++)
if (!pf_identify(pf))
return 0;
}
return -1;
}
static int pf_detect(void)
{
struct pf_unit *pf = units;
int k, unit;
printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
name, name, PF_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
return -1;
}
k = 0;
if (pf_drive_count == 0) {
if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
verbose, pf->name)) {
if (!pf_probe(pf) && pf->disk) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
} else
for (unit = 0; unit < PF_UNITS; unit++, pf++) {
int *conf = *drives[unit];
if (!conf[D_PRT])
continue;
if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
conf[D_UNI], conf[D_PRO], conf[D_DLY],
pf_scratch, PI_PF, verbose, pf->name)) {
if (pf->disk && !pf_probe(pf)) {
pf->present = 1;
k++;
} else
pi_release(pf->pi);
}
}
if (k)
return 0;
printk("%s: No ATAPI disk detected\n", name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
blk_cleanup_queue(pf->disk->queue);
pf->disk->queue = NULL;
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
}
pi_unregister_driver(par_drv);
return -1;
}
/* The i/o request engine */
static int pf_start(struct pf_unit *pf, int cmd, int b, int c)
{
int i;
char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
for (i = 0; i < 4; i++) {
io_cmd[5 - i] = b & 0xff;
b = b >> 8;
}
io_cmd[8] = c & 0xff;
io_cmd[7] = (c >> 8) & 0xff;
i = pf_command(pf, io_cmd, c * 512, "start i/o");
mdelay(1);
return i;
}
static int pf_ready(void)
{
return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
}
static int pf_queue;
static int set_next_request(void)
{
struct pf_unit *pf;
int old_pos = pf_queue;
do {
pf = &units[pf_queue];
if (++pf_queue == PF_UNITS)
pf_queue = 0;
if (pf->present && !list_empty(&pf->rq_list)) {
pf_req = list_first_entry(&pf->rq_list, struct request,
queuelist);
list_del_init(&pf_req->queuelist);
blk_mq_start_request(pf_req);
break;
}
} while (pf_queue != old_pos);
return pf_req != NULL;
}
static void pf_end_request(blk_status_t err)
{
if (!pf_req)
return;
if (!blk_update_request(pf_req, err, blk_rq_cur_bytes(pf_req))) {
__blk_mq_end_request(pf_req, err);
pf_req = NULL;
}
}
static void pf_request(void)
{
if (pf_busy)
return;
repeat:
if (!pf_req && !set_next_request())
return;
pf_current = pf_req->rq_disk->private_data;
pf_block = blk_rq_pos(pf_req);
pf_run = blk_rq_sectors(pf_req);
pf_count = blk_rq_cur_sectors(pf_req);
if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
pf_end_request(BLK_STS_IOERR);
goto repeat;
}
pf_cmd = rq_data_dir(pf_req);
pf_buf = bio_data(pf_req->bio);
pf_retries = 0;
pf_busy = 1;
if (pf_cmd == READ)
pi_do_claimed(pf_current->pi, do_pf_read);
else if (pf_cmd == WRITE)
pi_do_claimed(pf_current->pi, do_pf_write);
else {
pf_busy = 0;
pf_end_request(BLK_STS_IOERR);
goto repeat;
}
}
static blk_status_t pf_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct pf_unit *pf = hctx->queue->queuedata;
spin_lock_irq(&pf_spin_lock);
list_add_tail(&bd->rq->queuelist, &pf->rq_list);
pf_request();
spin_unlock_irq(&pf_spin_lock);
return BLK_STS_OK;
}
static int pf_next_buf(void)
{
unsigned long saved_flags;
pf_count--;
pf_run--;
pf_buf += 512;
pf_block++;
if (!pf_run)
return 1;
if (!pf_count) {
spin_lock_irqsave(&pf_spin_lock, saved_flags);
pf_end_request(0);
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
if (!pf_req)
return 1;
pf_count = blk_rq_cur_sectors(pf_req);
pf_buf = bio_data(pf_req->bio);
}
return 0;
}
static inline void next_request(blk_status_t err)
{
unsigned long saved_flags;
spin_lock_irqsave(&pf_spin_lock, saved_flags);
pf_end_request(err);
pf_busy = 0;
pf_request();
spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
}
/* detach from the calling context - in case the spinlock is held */
static void do_pf_read(void)
{
ps_set_intr(do_pf_read_start, NULL, 0, nice);
}
static void do_pf_read_start(void)
{
pf_busy = 1;
if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
next_request(BLK_STS_IOERR);
return;
}
pf_mask = STAT_DRQ;
ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice);
}
static void do_pf_read_drq(void)
{
while (1) {
if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
"read block", "completion") & STAT_ERR) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_req_sense(pf_current, 0);
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_read_start);
return;
}
next_request(BLK_STS_IOERR);
return;
}
pi_read_block(pf_current->pi, pf_buf, 512);
if (pf_next_buf())
break;
}
pi_disconnect(pf_current->pi);
next_request(0);
}
static void do_pf_write(void)
{
ps_set_intr(do_pf_write_start, NULL, 0, nice);
}
static void do_pf_write_start(void)
{
pf_busy = 1;
if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(BLK_STS_IOERR);
return;
}
while (1) {
if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
"write block", "data wait") & STAT_ERR) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(BLK_STS_IOERR);
return;
}
pi_write_block(pf_current->pi, pf_buf, 512);
if (pf_next_buf())
break;
}
pf_mask = 0;
ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice);
}
static void do_pf_write_done(void)
{
if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) {
pi_disconnect(pf_current->pi);
if (pf_retries < PF_MAX_RETRIES) {
pf_retries++;
pi_do_claimed(pf_current->pi, do_pf_write_start);
return;
}
next_request(BLK_STS_IOERR);
return;
}
pi_disconnect(pf_current->pi);
next_request(0);
}
static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
int unit;
if (disable)
return -EINVAL;
pf_init_units();
if (pf_detect())
return -ENODEV;
pf_busy = 0;
if (register_blkdev(major, name)) {
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
}
return -EBUSY;
}
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
struct gendisk *disk = pf->disk;
if (!pf->present)
continue;
disk->private_data = pf;
add_disk(disk);
}
return 0;
}
static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
unregister_blkdev(major, name);
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
if (!pf->disk)
continue;
if (pf->present)
del_gendisk(pf->disk);
blk_cleanup_queue(pf->disk->queue);
blk_mq_free_tag_set(&pf->tag_set);
put_disk(pf->disk);
if (pf->present)
pi_release(pf->pi);
}
}
MODULE_LICENSE("GPL");
module_init(pf_init)
module_exit(pf_exit)
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_1060_0 |
crossvul-cpp_data_good_3060_13 | /* Basic authentication token and access key management
*
* Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
#include "internal.h"
struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
struct rb_root key_user_tree; /* tree of quota records indexed by UID */
DEFINE_SPINLOCK(key_user_lock);
unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */
unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */
unsigned int key_quota_maxkeys = 200; /* general key count quota */
unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
printk("__key_check: key %p {%08x} should be {%08x}\n",
key, key->magic, KEY_DEBUG_MAGIC);
BUG();
}
#endif
/*
* Get the key quota record for a user, allocating a new record if one doesn't
* already exist.
*/
struct key_user *key_user_lookup(kuid_t uid)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent = NULL;
struct rb_node **p;
try_again:
p = &key_user_tree.rb_node;
spin_lock(&key_user_lock);
/* search the tree for a user record with a matching UID */
while (*p) {
parent = *p;
user = rb_entry(parent, struct key_user, node);
if (uid_lt(uid, user->uid))
p = &(*p)->rb_left;
else if (uid_gt(uid, user->uid))
p = &(*p)->rb_right;
else
goto found;
}
/* if we get here, we failed to find a match in the tree */
if (!candidate) {
/* allocate a candidate user record if we don't already have
* one */
spin_unlock(&key_user_lock);
user = NULL;
candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
if (unlikely(!candidate))
goto out;
/* the allocation may have scheduled, so we need to repeat the
* search lest someone else added the record whilst we were
* asleep */
goto try_again;
}
/* if we get here, then the user record still hadn't appeared on the
* second pass - so we use the candidate record */
atomic_set(&candidate->usage, 1);
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
mutex_init(&candidate->cons_lock);
rb_link_node(&candidate->node, parent, p);
rb_insert_color(&candidate->node, &key_user_tree);
spin_unlock(&key_user_lock);
user = candidate;
goto out;
/* okay - we found a user record for this UID */
found:
atomic_inc(&user->usage);
spin_unlock(&key_user_lock);
kfree(candidate);
out:
return user;
}
/*
* Dispose of a user structure
*/
void key_user_put(struct key_user *user)
{
if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
kfree(user);
}
}
/*
* Allocate a serial number for a key. These are assigned randomly to avoid
* security issues through covert channel problems.
*/
static inline void key_alloc_serial(struct key *key)
{
struct rb_node *parent, **p;
struct key *xkey;
/* propose a random serial number and look for a hole for it in the
* serial number tree */
do {
get_random_bytes(&key->serial, sizeof(key->serial));
key->serial >>= 1; /* negative numbers are not permitted */
} while (key->serial < 3);
spin_lock(&key_serial_lock);
attempt_insertion:
parent = NULL;
p = &key_serial_tree.rb_node;
while (*p) {
parent = *p;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
p = &(*p)->rb_left;
else if (key->serial > xkey->serial)
p = &(*p)->rb_right;
else
goto serial_exists;
}
/* we've found a suitable hole - arrange for this key to occupy it */
rb_link_node(&key->serial_node, parent, p);
rb_insert_color(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
return;
/* we found a key with the proposed serial number - walk the tree from
* that point looking for the next unused serial number */
serial_exists:
for (;;) {
key->serial++;
if (key->serial < 3) {
key->serial = 3;
goto attempt_insertion;
}
parent = rb_next(parent);
if (!parent)
goto attempt_insertion;
xkey = rb_entry(parent, struct key, serial_node);
if (key->serial < xkey->serial)
goto attempt_insertion;
}
}
/**
* key_alloc - Allocate a key of the specified type.
* @type: The type of key to allocate.
* @desc: The key description to allow the key to be searched out.
* @uid: The owner of the new key.
* @gid: The group ID for the new key's group permissions.
* @cred: The credentials specifying UID namespace.
* @perm: The permissions mask of the new key.
* @flags: Flags specifying quota properties.
*
* Allocate a key of the specified type with the attributes given. The key is
* returned in an uninstantiated state and the caller needs to instantiate the
* key before returning.
*
* The user's key count quota is updated to reflect the creation of the key and
* the user's key data quota has the default for the key type reserved. The
* instantiation function should amend this as necessary. If insufficient
* quota is available, -EDQUOT will be returned.
*
* The LSM security modules can prevent a key being created, in which case
* -EACCES will be returned.
*
* Returns a pointer to the new key if successful and an error code otherwise.
*
* Note that the caller needs to ensure the key type isn't uninstantiated.
* Internally this can be done by locking key_types_sem. Externally, this can
* be done by either never unregistering the key type, or making sure
* key_alloc() calls don't race with module unloading.
*/
struct key *key_alloc(struct key_type *type, const char *desc,
kuid_t uid, kgid_t gid, const struct cred *cred,
key_perm_t perm, unsigned long flags)
{
struct key_user *user = NULL;
struct key *key;
size_t desclen, quotalen;
int ret;
key = ERR_PTR(-EINVAL);
if (!desc || !*desc)
goto error;
if (type->vet_description) {
ret = type->vet_description(desc);
if (ret < 0) {
key = ERR_PTR(ret);
goto error;
}
}
desclen = strlen(desc);
quotalen = desclen + 1 + type->def_datalen;
/* get hold of the key tracking for this user */
user = key_user_lookup(uid);
if (!user)
goto no_memory_1;
/* check that the user's quota permits allocation of another key and
* its description */
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&user->lock);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
if (user->qnkeys + 1 >= maxkeys ||
user->qnbytes + quotalen >= maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
user->qnkeys++;
user->qnbytes += quotalen;
spin_unlock(&user->lock);
}
/* allocate and initialise the key and its description */
key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
if (desc) {
key->index_key.desc_len = desclen;
key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->description)
goto no_memory_3;
}
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
key->uid = uid;
key->gid = gid;
key->perm = perm;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_TRUSTED)
key->flags |= 1 << KEY_FLAG_TRUSTED;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
#endif
/* let the security module know about the key */
ret = security_key_alloc(key, cred, flags);
if (ret < 0)
goto security_error;
/* publish the key by giving it a serial number */
atomic_inc(&user->nkeys);
key_alloc_serial(key);
error:
return key;
security_error:
kfree(key->description);
kmem_cache_free(key_jar, key);
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock(&user->lock);
}
key_user_put(user);
key = ERR_PTR(ret);
goto error;
no_memory_3:
kmem_cache_free(key_jar, key);
no_memory_2:
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
spin_lock(&user->lock);
user->qnkeys--;
user->qnbytes -= quotalen;
spin_unlock(&user->lock);
}
key_user_put(user);
no_memory_1:
key = ERR_PTR(-ENOMEM);
goto error;
no_quota:
spin_unlock(&user->lock);
key_user_put(user);
key = ERR_PTR(-EDQUOT);
goto error;
}
EXPORT_SYMBOL(key_alloc);
/**
* key_payload_reserve - Adjust data quota reservation for the key's payload
* @key: The key to make the reservation for.
* @datalen: The amount of data payload the caller now wants.
*
* Adjust the amount of the owning user's key data quota that a key reserves.
* If the amount is increased, then -EDQUOT may be returned if there isn't
* enough free quota available.
*
* If successful, 0 is returned.
*/
int key_payload_reserve(struct key *key, size_t datalen)
{
int delta = (int)datalen - key->datalen;
int ret = 0;
key_check(key);
/* contemplate the quota adjustment */
if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&key->user->lock);
if (delta > 0 &&
(key->user->qnbytes + delta >= maxbytes ||
key->user->qnbytes + delta < key->user->qnbytes)) {
ret = -EDQUOT;
}
else {
key->user->qnbytes += delta;
key->quotalen += delta;
}
spin_unlock(&key->user->lock);
}
/* change the recorded data length if that didn't generate an error */
if (ret == 0)
key->datalen = datalen;
return ret;
}
EXPORT_SYMBOL(key_payload_reserve);
/*
* Instantiate a key and link it into the target keyring atomically. Must be
* called with the target keyring's semaphore writelocked. The target key's
* semaphore need not be locked as instantiation is serialised by
* key_construction_mutex.
*/
static int __key_instantiate_and_link(struct key *key,
struct key_preparsed_payload *prep,
struct key *keyring,
struct key *authkey,
struct assoc_array_edit **_edit)
{
int ret, awaken;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* instantiate the key */
ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
atomic_inc(&key->user->nikeys);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
/* and link it into the destination keyring */
if (keyring)
__key_link(key, _edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
if (prep->expiry != TIME_T_MAX) {
key->expiry = prep->expiry;
key_schedule_gc(prep->expiry + key_gc_delay);
}
}
}
mutex_unlock(&key_construction_mutex);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret;
}
/**
* key_instantiate_and_link - Instantiate a key and link it into the keyring.
* @key: The key to instantiate.
* @data: The data to use to instantiate the keyring.
* @datalen: The length of @data.
* @keyring: Keyring to create a link in on success (or NULL).
* @authkey: The authorisation token permitting instantiation.
*
* Instantiate a key that's in the uninstantiated state using the provided data
* and, if successful, link it in to the destination keyring if one is
* supplied.
*
* If successful, 0 is returned, the authorisation token is revoked and anyone
* waiting for the key is woken up. If the key was already instantiated,
* -EBUSY will be returned.
*/
int key_instantiate_and_link(struct key *key,
const void *data,
size_t datalen,
struct key *keyring,
struct key *authkey)
{
struct key_preparsed_payload prep;
struct assoc_array_edit *edit;
int ret;
memset(&prep, 0, sizeof(prep));
prep.data = data;
prep.datalen = datalen;
prep.quotalen = key->type->def_datalen;
prep.expiry = TIME_T_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
goto error;
}
if (keyring) {
ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret < 0)
goto error;
}
ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
if (keyring)
__key_link_end(keyring, &key->index_key, edit);
error:
if (key->type->preparse)
key->type->free_preparse(&prep);
return ret;
}
EXPORT_SYMBOL(key_instantiate_and_link);
/**
* key_reject_and_link - Negatively instantiate a key and link it into the keyring.
* @key: The key to instantiate.
* @timeout: The timeout on the negative key.
* @error: The error to return when the key is hit.
* @keyring: Keyring to create a link in on success (or NULL).
* @authkey: The authorisation token permitting instantiation.
*
* Negatively instantiate a key that's in the uninstantiated state and, if
* successful, set its timeout and stored error and link it in to the
* destination keyring if one is supplied. The key and any links to the key
* will be automatically garbage collected after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return the stored error code (typically ENOKEY) until the negative
* key expires.
*
* If successful, 0 is returned, the authorisation token is revoked and anyone
* waiting for the key is woken up. If the key was already instantiated,
* -EBUSY will be returned.
*/
int key_reject_and_link(struct key *key,
unsigned timeout,
unsigned error,
struct key *keyring,
struct key *authkey)
{
struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
awaken = 0;
ret = -EBUSY;
if (keyring)
link_ret = __key_link_begin(keyring, &key->index_key, &edit);
mutex_lock(&key_construction_mutex);
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
key->type_data.reject_error = -error;
smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
awaken = 1;
ret = 0;
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
__key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
key_revoke(authkey);
}
mutex_unlock(&key_construction_mutex);
if (keyring)
__key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret == 0 ? link_ret : ret;
}
EXPORT_SYMBOL(key_reject_and_link);
/**
* key_put - Discard a reference to a key.
* @key: The key to discard a reference from.
*
* Discard a reference to a key, and when all the references are gone, we
* schedule the cleanup task to come and pull it out of the tree in process
* context at some later time.
*/
void key_put(struct key *key)
{
if (key) {
key_check(key);
if (atomic_dec_and_test(&key->usage))
schedule_work(&key_gc_work);
}
}
EXPORT_SYMBOL(key_put);
/*
* Find a key by its serial number.
*/
struct key *key_lookup(key_serial_t id)
{
struct rb_node *n;
struct key *key;
spin_lock(&key_serial_lock);
/* search the tree for the specified key */
n = key_serial_tree.rb_node;
while (n) {
key = rb_entry(n, struct key, serial_node);
if (id < key->serial)
n = n->rb_left;
else if (id > key->serial)
n = n->rb_right;
else
goto found;
}
not_found:
key = ERR_PTR(-ENOKEY);
goto error;
found:
/* pretend it doesn't exist if it is awaiting deletion */
if (atomic_read(&key->usage) == 0)
goto not_found;
/* this races with key_put(), but that doesn't matter since key_put()
* doesn't actually change the key
*/
__key_get(key);
error:
spin_unlock(&key_serial_lock);
return key;
}
/*
* Find and lock the specified key type against removal.
*
* We return with the sem read-locked if successful. If the type wasn't
* available -ENOKEY is returned instead.
*/
struct key_type *key_type_lookup(const char *type)
{
struct key_type *ktype;
down_read(&key_types_sem);
/* look up the key type to see if it's one of the registered kernel
* types */
list_for_each_entry(ktype, &key_types_list, link) {
if (strcmp(ktype->name, type) == 0)
goto found_kernel_type;
}
up_read(&key_types_sem);
ktype = ERR_PTR(-ENOKEY);
found_kernel_type:
return ktype;
}
void key_set_timeout(struct key *key, unsigned timeout)
{
struct timespec now;
time_t expiry = 0;
/* make the changes with the locks held to prevent races */
down_write(&key->sem);
if (timeout > 0) {
now = current_kernel_time();
expiry = now.tv_sec + timeout;
}
key->expiry = expiry;
key_schedule_gc(key->expiry + key_gc_delay);
up_write(&key->sem);
}
EXPORT_SYMBOL_GPL(key_set_timeout);
/*
* Unlock a key type locked by key_type_lookup().
*/
void key_type_put(struct key_type *ktype)
{
up_read(&key_types_sem);
}
/*
* Attempt to update an existing key.
*
* The key is given to us with an incremented refcount that we need to discard
* if we get an error.
*/
static inline key_ref_t __key_update(key_ref_t key_ref,
struct key_preparsed_payload *prep)
{
struct key *key = key_ref_to_ptr(key_ref);
int ret;
/* need write permission on the key to update it */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
ret = -EEXIST;
if (!key->type->update)
goto error;
down_write(&key->sem);
ret = key->type->update(key, prep);
if (ret == 0)
/* updating a negative key instantiates it */
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
up_write(&key->sem);
if (ret < 0)
goto error;
out:
return key_ref;
error:
key_put(key);
key_ref = ERR_PTR(ret);
goto out;
}
/**
* key_create_or_update - Update or create and instantiate a key.
* @keyring_ref: A pointer to the destination keyring with possession flag.
* @type: The type of key.
* @description: The searchable description for the key.
* @payload: The data to use to instantiate or update the key.
* @plen: The length of @payload.
* @perm: The permissions mask for a new key.
* @flags: The quota flags for a new key.
*
* Search the destination keyring for a key of the same description and if one
* is found, update it, otherwise create and instantiate a new one and create a
* link to it from that keyring.
*
* If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
* concocted.
*
* Returns a pointer to the new key if successful, -ENODEV if the key type
* wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
* caller isn't permitted to modify the keyring or the LSM did not permit
* creation of the key.
*
* On success, the possession flag from the keyring ref will be tacked on to
* the key ref before it is returned.
*/
key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
const char *description,
const void *payload,
size_t plen,
key_perm_t perm,
unsigned long flags)
{
struct keyring_index_key index_key = {
.description = description,
};
struct key_preparsed_payload prep;
struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
index_key.type = key_type_lookup(type);
if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
if (!index_key.type->instantiate ||
(!index_key.description && !index_key.type->preparse))
goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
key_check(keyring);
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
goto error_put_type;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
prep.quotalen = index_key.type->def_datalen;
prep.trusted = flags & KEY_ALLOC_TRUSTED;
prep.expiry = TIME_T_MAX;
if (index_key.type->preparse) {
ret = index_key.type->preparse(&prep);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
if (!index_key.description)
index_key.description = prep.description;
key_ref = ERR_PTR(-EINVAL);
if (!index_key.description)
goto error_free_prep;
}
index_key.desc_len = strlen(index_key.description);
key_ref = ERR_PTR(-EPERM);
if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
goto error_free_prep;
flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
ret = __key_link_begin(keyring, &index_key, &edit);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_free_prep;
}
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
ret = key_permission(keyring_ref, KEY_NEED_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error_link_end;
}
/* if it's possible to update this type of key, search for an existing
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
if (index_key.type->update) {
key_ref = find_key_to_update(keyring_ref, &index_key);
if (key_ref)
goto found_matching_key;
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
perm |= KEY_USR_VIEW;
if (index_key.type->read)
perm |= KEY_POS_READ;
if (index_key.type == &key_type_keyring ||
index_key.type->update)
perm |= KEY_POS_WRITE;
}
/* allocate a new key */
key = key_alloc(index_key.type, index_key.description,
cred->fsuid, cred->fsgid, cred, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error_link_end;
}
/* instantiate it and link it into the target keyring */
ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
goto error_link_end;
}
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
error_link_end:
__key_link_end(keyring, &index_key, edit);
error_free_prep:
if (index_key.type->preparse)
index_key.type->free_preparse(&prep);
error_put_type:
key_type_put(index_key.type);
error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
__key_link_end(keyring, &index_key, edit);
key_ref = __key_update(key_ref, &prep);
goto error_free_prep;
}
EXPORT_SYMBOL(key_create_or_update);
/**
* key_update - Update a key's contents.
* @key_ref: The pointer (plus possession flag) to the key.
* @payload: The data to be used to update the key.
* @plen: The length of @payload.
*
* Attempt to update the contents of a key with the given payload data. The
* caller must be granted Write permission on the key. Negative keys can be
* instantiated by this method.
*
* Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
* type does not support updating. The key type may return other errors.
*/
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
{
struct key_preparsed_payload prep;
struct key *key = key_ref_to_ptr(key_ref);
int ret;
key_check(key);
/* the key must be writable */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
/* attempt to update it if supported */
ret = -EOPNOTSUPP;
if (!key->type->update)
goto error;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
prep.datalen = plen;
prep.quotalen = key->type->def_datalen;
prep.expiry = TIME_T_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
goto error;
}
down_write(&key->sem);
ret = key->type->update(key, &prep);
if (ret == 0)
/* updating a negative key instantiates it */
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
up_write(&key->sem);
error:
if (key->type->preparse)
key->type->free_preparse(&prep);
return ret;
}
EXPORT_SYMBOL(key_update);
/**
* key_revoke - Revoke a key.
* @key: The key to be revoked.
*
* Mark a key as being revoked and ask the type to free up its resources. The
* revocation timeout is set and the key and all its links will be
* automatically garbage collected after key_gc_delay amount of time if they
* are not manually dealt with first.
*/
void key_revoke(struct key *key)
{
struct timespec now;
time_t time;
key_check(key);
/* make sure no one's trying to change or use the key when we mark it
* - we tell lockdep that we might nest because we might be revoking an
* authorisation key whilst holding the sem on a key we've just
* instantiated
*/
down_write_nested(&key->sem, 1);
if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
key->type->revoke)
key->type->revoke(key);
/* set the death time to no more than the expiry time */
now = current_kernel_time();
time = now.tv_sec;
if (key->revoked_at == 0 || key->revoked_at > time) {
key->revoked_at = time;
key_schedule_gc(key->revoked_at + key_gc_delay);
}
up_write(&key->sem);
}
EXPORT_SYMBOL(key_revoke);
/**
* key_invalidate - Invalidate a key.
* @key: The key to be invalidated.
*
* Mark a key as being invalidated and have it cleaned up immediately. The key
* is ignored by all searches and other operations from this point.
*/
void key_invalidate(struct key *key)
{
kenter("%d", key_serial(key));
key_check(key);
if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
down_write_nested(&key->sem, 1);
if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
key_schedule_gc_links();
up_write(&key->sem);
}
}
EXPORT_SYMBOL(key_invalidate);
/**
* generic_key_instantiate - Simple instantiation of a key from preparsed data
* @key: The key to be instantiated
* @prep: The preparsed data to load.
*
* Instantiate a key from preparsed data. We assume we can just copy the data
* in directly and clear the old pointers.
*
* This can be pointed to directly by the key type instantiate op pointer.
*/
int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
int ret;
pr_devel("==>%s()\n", __func__);
ret = key_payload_reserve(key, prep->quotalen);
if (ret == 0) {
key->type_data.p[0] = prep->type_data[0];
key->type_data.p[1] = prep->type_data[1];
rcu_assign_keypointer(key, prep->payload[0]);
key->payload.data2[1] = prep->payload[1];
prep->type_data[0] = NULL;
prep->type_data[1] = NULL;
prep->payload[0] = NULL;
prep->payload[1] = NULL;
}
pr_devel("<==%s() = %d\n", __func__, ret);
return ret;
}
EXPORT_SYMBOL(generic_key_instantiate);
/**
* register_key_type - Register a type of key.
* @ktype: The new key type.
*
* Register a new key type.
*
* Returns 0 on success or -EEXIST if a type of this name already exists.
*/
int register_key_type(struct key_type *ktype)
{
struct key_type *p;
int ret;
memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
ret = -EEXIST;
down_write(&key_types_sem);
/* disallow key types with the same name */
list_for_each_entry(p, &key_types_list, link) {
if (strcmp(p->name, ktype->name) == 0)
goto out;
}
/* store the type */
list_add(&ktype->link, &key_types_list);
pr_notice("Key type %s registered\n", ktype->name);
ret = 0;
out:
up_write(&key_types_sem);
return ret;
}
EXPORT_SYMBOL(register_key_type);
/**
* unregister_key_type - Unregister a type of key.
* @ktype: The key type.
*
* Unregister a key type and mark all the extant keys of this type as dead.
* Those keys of this type are then destroyed to get rid of their payloads and
* they and their links will be garbage collected as soon as possible.
*/
void unregister_key_type(struct key_type *ktype)
{
down_write(&key_types_sem);
list_del_init(&ktype->link);
downgrade_write(&key_types_sem);
key_gc_keytype(ktype);
pr_notice("Key type %s unregistered\n", ktype->name);
up_read(&key_types_sem);
}
EXPORT_SYMBOL(unregister_key_type);
/*
* Initialise the key management state.
*/
void __init key_init(void)
{
/* allocate a slab in which we can store keys */
key_jar = kmem_cache_create("key_jar", sizeof(struct key),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
/* add the special key types */
list_add_tail(&key_type_keyring.link, &key_types_list);
list_add_tail(&key_type_dead.link, &key_types_list);
list_add_tail(&key_type_user.link, &key_types_list);
list_add_tail(&key_type_logon.link, &key_types_list);
/* record the root user tracking */
rb_link_node(&root_key_user.node,
NULL,
&key_user_tree.rb_node);
rb_insert_color(&root_key_user.node,
&key_user_tree);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3060_13 |
crossvul-cpp_data_bad_2199_0 | /*
* Copyright (C) 2006,2008 by the Massachusetts Institute of Technology.
* All rights reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* A module that implements the spnego security mechanism.
* It is used to negotiate the security mechanism between
* peers using the GSS-API. SPNEGO is specified in RFC 4178.
*
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* #pragma ident "@(#)spnego_mech.c 1.7 04/09/28 SMI" */
#include <k5-int.h>
#include <krb5.h>
#include <mglueP.h>
#include "gssapiP_spnego.h"
#include <gssapi_err_generic.h>
#undef g_token_size
#undef g_verify_token_header
#undef g_make_token_header
#define HARD_ERROR(v) ((v) != GSS_S_COMPLETE && (v) != GSS_S_CONTINUE_NEEDED)
typedef const gss_OID_desc *gss_OID_const;
/* der routines defined in libgss */
extern unsigned int gssint_der_length_size(unsigned int);
extern int gssint_get_der_length(unsigned char **, unsigned int,
unsigned int*);
extern int gssint_put_der_length(unsigned int, unsigned char **, unsigned int);
/* private routines for spnego_mechanism */
static spnego_token_t make_spnego_token(const char *);
static gss_buffer_desc make_err_msg(const char *);
static int g_token_size(gss_OID_const, unsigned int);
static int g_make_token_header(gss_OID_const, unsigned int,
unsigned char **, unsigned int);
static int g_verify_token_header(gss_OID_const, unsigned int *,
unsigned char **,
int, unsigned int);
static int g_verify_neg_token_init(unsigned char **, unsigned int);
static gss_OID get_mech_oid(OM_uint32 *, unsigned char **, size_t);
static gss_buffer_t get_input_token(unsigned char **, unsigned int);
static gss_OID_set get_mech_set(OM_uint32 *, unsigned char **, unsigned int);
static OM_uint32 get_req_flags(unsigned char **, OM_uint32, OM_uint32 *);
static OM_uint32 get_available_mechs(OM_uint32 *, gss_name_t, gss_cred_usage_t,
gss_const_key_value_set_t,
gss_cred_id_t *, gss_OID_set *);
static OM_uint32 get_negotiable_mechs(OM_uint32 *, spnego_gss_cred_id_t,
gss_cred_usage_t, gss_OID_set *);
static void release_spnego_ctx(spnego_gss_ctx_id_t *);
static void check_spnego_options(spnego_gss_ctx_id_t);
static spnego_gss_ctx_id_t create_spnego_ctx(void);
static int put_mech_set(gss_OID_set mechSet, gss_buffer_t buf);
static int put_input_token(unsigned char **, gss_buffer_t, unsigned int);
static int put_mech_oid(unsigned char **, gss_OID_const, unsigned int);
static int put_negResult(unsigned char **, OM_uint32, unsigned int);
static OM_uint32
process_mic(OM_uint32 *, gss_buffer_t, spnego_gss_ctx_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
handle_mic(OM_uint32 *, gss_buffer_t, int, spnego_gss_ctx_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_new(OM_uint32 *, spnego_gss_cred_id_t, gss_ctx_id_t *,
send_token_flag *);
static OM_uint32
init_ctx_nego(OM_uint32 *, spnego_gss_ctx_id_t, OM_uint32, gss_OID,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_cont(OM_uint32 *, gss_ctx_id_t *, gss_buffer_t,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_reselect(OM_uint32 *, spnego_gss_ctx_id_t, OM_uint32,
gss_OID, gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_call_init(OM_uint32 *, spnego_gss_ctx_id_t, spnego_gss_cred_id_t,
gss_name_t, OM_uint32, OM_uint32, gss_buffer_t,
gss_OID *, gss_buffer_t, OM_uint32 *, OM_uint32 *,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_new(OM_uint32 *, gss_buffer_t, gss_ctx_id_t *,
spnego_gss_cred_id_t, gss_buffer_t *,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_cont(OM_uint32 *, gss_buffer_t, gss_ctx_id_t *,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_vfy_oid(OM_uint32 *, spnego_gss_ctx_id_t, gss_OID,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_call_acc(OM_uint32 *, spnego_gss_ctx_id_t, spnego_gss_cred_id_t,
gss_buffer_t, gss_OID *, gss_buffer_t,
OM_uint32 *, OM_uint32 *, gss_cred_id_t *,
OM_uint32 *, send_token_flag *);
static gss_OID
negotiate_mech(gss_OID_set, gss_OID_set, OM_uint32 *);
static int
g_get_tag_and_length(unsigned char **, int, unsigned int, unsigned int *);
static int
make_spnego_tokenInit_msg(spnego_gss_ctx_id_t,
int,
gss_buffer_t,
OM_uint32, gss_buffer_t, send_token_flag,
gss_buffer_t);
static int
make_spnego_tokenTarg_msg(OM_uint32, gss_OID, gss_buffer_t,
gss_buffer_t, send_token_flag,
gss_buffer_t);
static OM_uint32
get_negTokenInit(OM_uint32 *, gss_buffer_t, gss_buffer_t,
gss_OID_set *, OM_uint32 *, gss_buffer_t *,
gss_buffer_t *);
static OM_uint32
get_negTokenResp(OM_uint32 *, unsigned char *, unsigned int,
OM_uint32 *, gss_OID *, gss_buffer_t *, gss_buffer_t *);
static int
is_kerb_mech(gss_OID oid);
/* SPNEGO oid structure */
static const gss_OID_desc spnego_oids[] = {
{SPNEGO_OID_LENGTH, SPNEGO_OID},
};
const gss_OID_desc * const gss_mech_spnego = spnego_oids+0;
static const gss_OID_set_desc spnego_oidsets[] = {
{1, (gss_OID) spnego_oids+0},
};
const gss_OID_set_desc * const gss_mech_set_spnego = spnego_oidsets+0;
static int make_NegHints(OM_uint32 *, spnego_gss_cred_id_t, gss_buffer_t *);
static int put_neg_hints(unsigned char **, gss_buffer_t, unsigned int);
static OM_uint32
acc_ctx_hints(OM_uint32 *, gss_ctx_id_t *, spnego_gss_cred_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
/*
* The Mech OID for SPNEGO:
* { iso(1) org(3) dod(6) internet(1) security(5)
* mechanism(5) spnego(2) }
*/
static struct gss_config spnego_mechanism =
{
{SPNEGO_OID_LENGTH, SPNEGO_OID},
NULL,
spnego_gss_acquire_cred,
spnego_gss_release_cred,
spnego_gss_init_sec_context,
#ifndef LEAN_CLIENT
spnego_gss_accept_sec_context,
#else
NULL,
#endif /* LEAN_CLIENT */
NULL, /* gss_process_context_token */
spnego_gss_delete_sec_context, /* gss_delete_sec_context */
spnego_gss_context_time, /* gss_context_time */
spnego_gss_get_mic, /* gss_get_mic */
spnego_gss_verify_mic, /* gss_verify_mic */
spnego_gss_wrap, /* gss_wrap */
spnego_gss_unwrap, /* gss_unwrap */
spnego_gss_display_status,
NULL, /* gss_indicate_mechs */
spnego_gss_compare_name,
spnego_gss_display_name,
spnego_gss_import_name,
spnego_gss_release_name,
spnego_gss_inquire_cred, /* gss_inquire_cred */
NULL, /* gss_add_cred */
#ifndef LEAN_CLIENT
spnego_gss_export_sec_context, /* gss_export_sec_context */
spnego_gss_import_sec_context, /* gss_import_sec_context */
#else
NULL, /* gss_export_sec_context */
NULL, /* gss_import_sec_context */
#endif /* LEAN_CLIENT */
NULL, /* gss_inquire_cred_by_mech */
spnego_gss_inquire_names_for_mech,
spnego_gss_inquire_context, /* gss_inquire_context */
NULL, /* gss_internal_release_oid */
spnego_gss_wrap_size_limit, /* gss_wrap_size_limit */
NULL, /* gssd_pname_to_uid */
NULL, /* gss_userok */
NULL, /* gss_export_name */
spnego_gss_duplicate_name, /* gss_duplicate_name */
NULL, /* gss_store_cred */
spnego_gss_inquire_sec_context_by_oid, /* gss_inquire_sec_context_by_oid */
spnego_gss_inquire_cred_by_oid, /* gss_inquire_cred_by_oid */
spnego_gss_set_sec_context_option, /* gss_set_sec_context_option */
spnego_gss_set_cred_option, /* gssspi_set_cred_option */
NULL, /* gssspi_mech_invoke */
spnego_gss_wrap_aead,
spnego_gss_unwrap_aead,
spnego_gss_wrap_iov,
spnego_gss_unwrap_iov,
spnego_gss_wrap_iov_length,
spnego_gss_complete_auth_token,
spnego_gss_acquire_cred_impersonate_name,
NULL, /* gss_add_cred_impersonate_name */
spnego_gss_display_name_ext,
spnego_gss_inquire_name,
spnego_gss_get_name_attribute,
spnego_gss_set_name_attribute,
spnego_gss_delete_name_attribute,
spnego_gss_export_name_composite,
spnego_gss_map_name_to_any,
spnego_gss_release_any_name_mapping,
spnego_gss_pseudo_random,
spnego_gss_set_neg_mechs,
spnego_gss_inquire_saslname_for_mech,
spnego_gss_inquire_mech_for_saslname,
spnego_gss_inquire_attrs_for_mech,
spnego_gss_acquire_cred_from,
NULL, /* gss_store_cred_into */
spnego_gss_acquire_cred_with_password,
spnego_gss_export_cred,
spnego_gss_import_cred,
NULL, /* gssspi_import_sec_context_by_mech */
NULL, /* gssspi_import_name_by_mech */
NULL, /* gssspi_import_cred_by_mech */
spnego_gss_get_mic_iov,
spnego_gss_verify_mic_iov,
spnego_gss_get_mic_iov_length
};
#ifdef _GSS_STATIC_LINK
#include "mglueP.h"
static int gss_spnegomechglue_init(void)
{
struct gss_mech_config mech_spnego;
memset(&mech_spnego, 0, sizeof(mech_spnego));
mech_spnego.mech = &spnego_mechanism;
mech_spnego.mechNameStr = "spnego";
mech_spnego.mech_type = GSS_C_NO_OID;
return gssint_register_mechinfo(&mech_spnego);
}
#else
gss_mechanism KRB5_CALLCONV
gss_mech_initialize(void)
{
return (&spnego_mechanism);
}
MAKE_INIT_FUNCTION(gss_krb5int_lib_init);
MAKE_FINI_FUNCTION(gss_krb5int_lib_fini);
int gss_krb5int_lib_init(void);
#endif /* _GSS_STATIC_LINK */
int gss_spnegoint_lib_init(void)
{
int err;
err = k5_key_register(K5_KEY_GSS_SPNEGO_STATUS, NULL);
if (err)
return err;
#ifdef _GSS_STATIC_LINK
return gss_spnegomechglue_init();
#else
return 0;
#endif
}
void gss_spnegoint_lib_fini(void)
{
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred(OM_uint32 *minor_status,
gss_name_t desired_name,
OM_uint32 time_req,
gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
return spnego_gss_acquire_cred_from(minor_status, desired_name, time_req,
desired_mechs, cred_usage, NULL,
output_cred_handle, actual_mechs,
time_rec);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_from(OM_uint32 *minor_status,
const gss_name_t desired_name,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_const_key_value_set_t cred_store,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status, tmpmin;
gss_OID_set amechs;
gss_cred_id_t mcred = NULL;
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_acquire_cred\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
/* We will obtain a mechglue credential and wrap it in a
* spnego_gss_cred_id_rec structure. Allocate the wrapper. */
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->neg_mechs = GSS_C_NULL_OID_SET;
/*
* Always use get_available_mechs to collect a list of
* mechs for which creds are available.
*/
status = get_available_mechs(minor_status, desired_name,
cred_usage, cred_store, &mcred,
&amechs);
if (actual_mechs && amechs != GSS_C_NULL_OID_SET) {
(void) gssint_copy_oid_set(&tmpmin, amechs, actual_mechs);
}
(void) gss_release_oid_set(&tmpmin, &amechs);
if (status == GSS_S_COMPLETE) {
spcred->mcred = mcred;
*output_cred_handle = (gss_cred_id_t)spcred;
} else {
free(spcred);
*output_cred_handle = GSS_C_NO_CREDENTIAL;
}
dsyslog("Leaving spnego_gss_acquire_cred\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_release_cred(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle)
{
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_release_cred\n");
if (minor_status == NULL || cred_handle == NULL)
return (GSS_S_CALL_INACCESSIBLE_WRITE);
*minor_status = 0;
if (*cred_handle == GSS_C_NO_CREDENTIAL)
return (GSS_S_COMPLETE);
spcred = (spnego_gss_cred_id_t)*cred_handle;
*cred_handle = GSS_C_NO_CREDENTIAL;
gss_release_oid_set(minor_status, &spcred->neg_mechs);
gss_release_cred(minor_status, &spcred->mcred);
free(spcred);
dsyslog("Leaving spnego_gss_release_cred\n");
return (GSS_S_COMPLETE);
}
static void
check_spnego_options(spnego_gss_ctx_id_t spnego_ctx)
{
spnego_ctx->optionStr = gssint_get_modOptions(
(const gss_OID)&spnego_oids[0]);
}
static spnego_gss_ctx_id_t
create_spnego_ctx(void)
{
spnego_gss_ctx_id_t spnego_ctx = NULL;
spnego_ctx = (spnego_gss_ctx_id_t)
malloc(sizeof (spnego_gss_ctx_id_rec));
if (spnego_ctx == NULL) {
return (NULL);
}
spnego_ctx->magic_num = SPNEGO_MAGIC_ID;
spnego_ctx->ctx_handle = GSS_C_NO_CONTEXT;
spnego_ctx->mech_set = NULL;
spnego_ctx->internal_mech = NULL;
spnego_ctx->optionStr = NULL;
spnego_ctx->DER_mechTypes.length = 0;
spnego_ctx->DER_mechTypes.value = NULL;
spnego_ctx->default_cred = GSS_C_NO_CREDENTIAL;
spnego_ctx->mic_reqd = 0;
spnego_ctx->mic_sent = 0;
spnego_ctx->mic_rcvd = 0;
spnego_ctx->mech_complete = 0;
spnego_ctx->nego_done = 0;
spnego_ctx->internal_name = GSS_C_NO_NAME;
spnego_ctx->actual_mech = GSS_C_NO_OID;
check_spnego_options(spnego_ctx);
return (spnego_ctx);
}
/* iso(1) org(3) dod(6) internet(1) private(4) enterprises(1) samba(7165)
* gssntlmssp(655) controls(1) spnego_req_mechlistMIC(2) */
static const gss_OID_desc spnego_req_mechlistMIC_oid =
{ 11, "\x2B\x06\x01\x04\x01\xB7\x7D\x85\x0F\x01\x02" };
/*
* Return nonzero if the mechanism has reason to believe that a mechlistMIC
* exchange will be required. Microsoft servers erroneously require SPNEGO
* mechlistMIC if they see an internal MIC within an NTLMSSP Authenticate
* message, even if NTLMSSP was the preferred mechanism.
*/
static int
mech_requires_mechlistMIC(spnego_gss_ctx_id_t sc)
{
OM_uint32 major, minor;
gss_ctx_id_t ctx = sc->ctx_handle;
gss_OID oid = (gss_OID)&spnego_req_mechlistMIC_oid;
gss_buffer_set_t bufs;
int result;
major = gss_inquire_sec_context_by_oid(&minor, ctx, oid, &bufs);
if (major != GSS_S_COMPLETE)
return 0;
/* Report true if the mech returns a single buffer containing a single
* byte with value 1. */
result = (bufs != NULL && bufs->count == 1 &&
bufs->elements[0].length == 1 &&
memcmp(bufs->elements[0].value, "\1", 1) == 0);
(void) gss_release_buffer_set(&minor, &bufs);
return result;
}
/*
* Both initiator and acceptor call here to verify and/or create mechListMIC,
* and to consistency-check the MIC state. handle_mic is invoked only if the
* negotiated mech has completed and supports MICs.
*/
static OM_uint32
handle_mic(OM_uint32 *minor_status, gss_buffer_t mic_in,
int send_mechtok, spnego_gss_ctx_id_t sc,
gss_buffer_t *mic_out,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
ret = GSS_S_FAILURE;
*mic_out = GSS_C_NO_BUFFER;
if (mic_in != GSS_C_NO_BUFFER) {
if (sc->mic_rcvd) {
/* Reject MIC if we've already received a MIC. */
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_DEFECTIVE_TOKEN;
}
} else if (sc->mic_reqd && !send_mechtok) {
/*
* If the peer sends the final mechanism token, it
* must send the MIC with that token if the
* negotiation requires MICs.
*/
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_DEFECTIVE_TOKEN;
}
ret = process_mic(minor_status, mic_in, sc, mic_out,
negState, tokflag);
if (ret != GSS_S_COMPLETE) {
return ret;
}
if (sc->mic_reqd) {
assert(sc->mic_sent || sc->mic_rcvd);
}
if (sc->mic_sent && sc->mic_rcvd) {
ret = GSS_S_COMPLETE;
*negState = ACCEPT_COMPLETE;
if (*mic_out == GSS_C_NO_BUFFER) {
/*
* We sent a MIC on the previous pass; we
* shouldn't be sending a mechanism token.
*/
assert(!send_mechtok);
*tokflag = NO_TOKEN_SEND;
} else {
*tokflag = CONT_TOKEN_SEND;
}
} else if (sc->mic_reqd) {
*negState = ACCEPT_INCOMPLETE;
ret = GSS_S_CONTINUE_NEEDED;
} else if (*negState == ACCEPT_COMPLETE) {
ret = GSS_S_COMPLETE;
} else {
ret = GSS_S_CONTINUE_NEEDED;
}
return ret;
}
/*
* Perform the actual verification and/or generation of mechListMIC.
*/
static OM_uint32
process_mic(OM_uint32 *minor_status, gss_buffer_t mic_in,
spnego_gss_ctx_id_t sc, gss_buffer_t *mic_out,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin;
gss_qop_t qop_state;
gss_buffer_desc tmpmic = GSS_C_EMPTY_BUFFER;
ret = GSS_S_FAILURE;
if (mic_in != GSS_C_NO_BUFFER) {
ret = gss_verify_mic(minor_status, sc->ctx_handle,
&sc->DER_mechTypes,
mic_in, &qop_state);
if (ret != GSS_S_COMPLETE) {
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return ret;
}
/* If we got a MIC, we must send a MIC. */
sc->mic_reqd = 1;
sc->mic_rcvd = 1;
}
if (sc->mic_reqd && !sc->mic_sent) {
ret = gss_get_mic(minor_status, sc->ctx_handle,
GSS_C_QOP_DEFAULT,
&sc->DER_mechTypes,
&tmpmic);
if (ret != GSS_S_COMPLETE) {
gss_release_buffer(&tmpmin, &tmpmic);
*tokflag = NO_TOKEN_SEND;
return ret;
}
*mic_out = malloc(sizeof(gss_buffer_desc));
if (*mic_out == GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, &tmpmic);
*tokflag = NO_TOKEN_SEND;
return GSS_S_FAILURE;
}
**mic_out = tmpmic;
sc->mic_sent = 1;
}
return GSS_S_COMPLETE;
}
/*
* Initial call to spnego_gss_init_sec_context().
*/
static OM_uint32
init_ctx_new(OM_uint32 *minor_status,
spnego_gss_cred_id_t spcred,
gss_ctx_id_t *ctx,
send_token_flag *tokflag)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = NULL;
sc = create_spnego_ctx();
if (sc == NULL)
return GSS_S_FAILURE;
/* determine negotiation mech set */
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_INITIATE,
&sc->mech_set);
if (ret != GSS_S_COMPLETE)
goto cleanup;
/* Set an initial internal mech to make the first context token. */
sc->internal_mech = &sc->mech_set->elements[0];
if (put_mech_set(sc->mech_set, &sc->DER_mechTypes) < 0) {
ret = GSS_S_FAILURE;
goto cleanup;
}
/*
* The actual context is not yet determined, set the output
* context handle to refer to the spnego context itself.
*/
sc->ctx_handle = GSS_C_NO_CONTEXT;
*ctx = (gss_ctx_id_t)sc;
sc = NULL;
*tokflag = INIT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
cleanup:
release_spnego_ctx(&sc);
return ret;
}
/*
* Called by second and later calls to spnego_gss_init_sec_context()
* to decode reply and update state.
*/
static OM_uint32
init_ctx_cont(OM_uint32 *minor_status, gss_ctx_id_t *ctx, gss_buffer_t buf,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin, acc_negState;
unsigned char *ptr;
spnego_gss_ctx_id_t sc;
gss_OID supportedMech = GSS_C_NO_OID;
sc = (spnego_gss_ctx_id_t)*ctx;
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ptr = buf->value;
ret = get_negTokenResp(minor_status, ptr, buf->length,
&acc_negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (acc_negState == REJECT) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_FAILURE;
goto cleanup;
}
/*
* nego_done is false for the first call to init_ctx_cont()
*/
if (!sc->nego_done) {
ret = init_ctx_nego(minor_status, sc,
acc_negState,
supportedMech, responseToken,
mechListMIC,
negState, tokflag);
} else if ((!sc->mech_complete && *responseToken == GSS_C_NO_BUFFER) ||
(sc->mech_complete && *responseToken != GSS_C_NO_BUFFER)) {
/* Missing or spurious token from acceptor. */
ret = GSS_S_DEFECTIVE_TOKEN;
} else if (!sc->mech_complete ||
(sc->mic_reqd &&
(sc->ctx_flags & GSS_C_INTEG_FLAG))) {
/* Not obviously done; we may decide we're done later in
* init_ctx_call_init or handle_mic. */
*negState = ACCEPT_INCOMPLETE;
*tokflag = CONT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
} else {
/* mech finished on last pass and no MIC required, so done. */
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
}
cleanup:
if (supportedMech != GSS_C_NO_OID)
generic_gss_release_oid(&tmpmin, &supportedMech);
return ret;
}
/*
* Consistency checking and mechanism negotiation handling for second
* call of spnego_gss_init_sec_context(). Call init_ctx_reselect() to
* update internal state if acceptor has counter-proposed.
*/
static OM_uint32
init_ctx_nego(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ret = GSS_S_DEFECTIVE_TOKEN;
/*
* Both supportedMech and negState must be present in first
* acceptor token.
*/
if (supportedMech == GSS_C_NO_OID) {
*minor_status = ERR_SPNEGO_NO_MECH_FROM_ACCEPTOR;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
if (acc_negState == ACCEPT_DEFECTIVE_TOKEN) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
/*
* If the mechanism we sent is not the mechanism returned from
* the server, we need to handle the server's counter
* proposal. There is a bug in SAMBA servers that always send
* the old Kerberos mech OID, even though we sent the new one.
* So we will treat all the Kerberos mech OIDS as the same.
*/
if (!(is_kerb_mech(supportedMech) &&
is_kerb_mech(sc->internal_mech)) &&
!g_OID_equal(supportedMech, sc->internal_mech)) {
ret = init_ctx_reselect(minor_status, sc,
acc_negState, supportedMech,
responseToken, mechListMIC,
negState, tokflag);
} else if (*responseToken == GSS_C_NO_BUFFER) {
if (sc->mech_complete) {
/*
* Mech completed on first call to its
* init_sec_context(). Acceptor sends no mech
* token.
*/
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else {
/*
* Reject missing mech token when optimistic
* mech selected.
*/
*minor_status = ERR_SPNEGO_NO_TOKEN_FROM_ACCEPTOR;
map_errcode(minor_status);
ret = GSS_S_DEFECTIVE_TOKEN;
}
} else if ((*responseToken)->length == 0 && sc->mech_complete) {
/* Handle old IIS servers returning empty token instead of
* null tokens in the non-mutual auth case. */
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else if (sc->mech_complete) {
/* Reject spurious mech token. */
ret = GSS_S_DEFECTIVE_TOKEN;
} else {
*negState = ACCEPT_INCOMPLETE;
*tokflag = CONT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
}
sc->nego_done = 1;
return ret;
}
/*
* Handle acceptor's counter-proposal of an alternative mechanism.
*/
static OM_uint32
init_ctx_reselect(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 tmpmin;
size_t i;
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
/* Find supportedMech in sc->mech_set. */
for (i = 0; i < sc->mech_set->count; i++) {
if (g_OID_equal(supportedMech, &sc->mech_set->elements[i]))
break;
}
if (i == sc->mech_set->count)
return GSS_S_DEFECTIVE_TOKEN;
sc->internal_mech = &sc->mech_set->elements[i];
/*
* Windows 2003 and earlier don't correctly send a
* negState of request-mic when counter-proposing a
* mechanism. They probably don't handle mechListMICs
* properly either.
*/
if (acc_negState != REQUEST_MIC)
return GSS_S_DEFECTIVE_TOKEN;
sc->mech_complete = 0;
sc->mic_reqd = 1;
*negState = REQUEST_MIC;
*tokflag = CONT_TOKEN_SEND;
return GSS_S_CONTINUE_NEEDED;
}
/*
* Wrap call to mechanism gss_init_sec_context() and update state
* accordingly.
*/
static OM_uint32
init_ctx_call_init(OM_uint32 *minor_status,
spnego_gss_ctx_id_t sc,
spnego_gss_cred_id_t spcred,
gss_name_t target_name,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_buffer_t mechtok_in,
gss_OID *actual_mech,
gss_buffer_t mechtok_out,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
OM_uint32 *negState,
send_token_flag *send_token)
{
OM_uint32 ret, tmpret, tmpmin;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_init_sec_context(minor_status,
mcred,
&sc->ctx_handle,
target_name,
sc->internal_mech,
(req_flags | GSS_C_INTEG_FLAG),
time_req,
GSS_C_NO_CHANNEL_BINDINGS,
mechtok_in,
&sc->actual_mech,
mechtok_out,
&sc->ctx_flags,
time_rec);
if (ret == GSS_S_COMPLETE) {
sc->mech_complete = 1;
if (ret_flags != NULL)
*ret_flags = sc->ctx_flags;
/*
* Microsoft SPNEGO implementations expect an even number of
* token exchanges. So if we're sending a final token, ask for
* a zero-length token back from the server. Also ask for a
* token back if this is the first token or if a MIC exchange
* is required.
*/
if (*send_token == CONT_TOKEN_SEND &&
mechtok_out->length == 0 &&
(!sc->mic_reqd ||
!(sc->ctx_flags & GSS_C_INTEG_FLAG))) {
/* The exchange is complete. */
*negState = ACCEPT_COMPLETE;
ret = GSS_S_COMPLETE;
*send_token = NO_TOKEN_SEND;
} else {
/* Ask for one more hop. */
*negState = ACCEPT_INCOMPLETE;
ret = GSS_S_CONTINUE_NEEDED;
}
return ret;
}
if (ret == GSS_S_CONTINUE_NEEDED)
return ret;
if (*send_token != INIT_TOKEN_SEND) {
*send_token = ERROR_TOKEN_SEND;
*negState = REJECT;
return ret;
}
/*
* Since this is the first token, we can fall back to later mechanisms
* in the list. Since the mechanism list is expected to be short, we
* can do this with recursion. If all mechanisms produce errors, the
* caller should get the error from the first mech in the list.
*/
gssalloc_free(sc->mech_set->elements->elements);
memmove(sc->mech_set->elements, sc->mech_set->elements + 1,
--sc->mech_set->count * sizeof(*sc->mech_set->elements));
if (sc->mech_set->count == 0)
goto fail;
gss_release_buffer(&tmpmin, &sc->DER_mechTypes);
if (put_mech_set(sc->mech_set, &sc->DER_mechTypes) < 0)
goto fail;
tmpret = init_ctx_call_init(&tmpmin, sc, spcred, target_name,
req_flags, time_req, mechtok_in,
actual_mech, mechtok_out, ret_flags,
time_rec, negState, send_token);
if (HARD_ERROR(tmpret))
goto fail;
*minor_status = tmpmin;
return tmpret;
fail:
/* Don't output token on error from first call. */
*send_token = NO_TOKEN_SEND;
*negState = REJECT;
return ret;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_init_sec_context(
OM_uint32 *minor_status,
gss_cred_id_t claimant_cred_handle,
gss_ctx_id_t *context_handle,
gss_name_t target_name,
gss_OID mech_type,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_channel_bindings_t input_chan_bindings,
gss_buffer_t input_token,
gss_OID *actual_mech,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec)
{
send_token_flag send_token = NO_TOKEN_SEND;
OM_uint32 tmpmin, ret, negState;
gss_buffer_t mechtok_in, mechListMIC_in, mechListMIC_out;
gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER;
spnego_gss_cred_id_t spcred = NULL;
spnego_gss_ctx_id_t spnego_ctx = NULL;
dsyslog("Entering init_sec_context\n");
mechtok_in = mechListMIC_out = mechListMIC_in = GSS_C_NO_BUFFER;
negState = REJECT;
/*
* This function works in three steps:
*
* 1. Perform mechanism negotiation.
* 2. Invoke the negotiated or optimistic mech's gss_init_sec_context
* function and examine the results.
* 3. Process or generate MICs if necessary.
*
* The three steps share responsibility for determining when the
* exchange is complete. If the selected mech completed in a previous
* call and no MIC exchange is expected, then step 1 will decide. If
* the selected mech completes in this call and no MIC exchange is
* expected, then step 2 will decide. If a MIC exchange is expected,
* then step 3 will decide. If an error occurs in any step, the
* exchange will be aborted, possibly with an error token.
*
* negState determines the state of the negotiation, and is
* communicated to the acceptor if a continuing token is sent.
* send_token is used to indicate what type of token, if any, should be
* generated.
*/
/* Validate arguments. */
if (minor_status != NULL)
*minor_status = 0;
if (output_token != GSS_C_NO_BUFFER) {
output_token->length = 0;
output_token->value = NULL;
}
if (minor_status == NULL ||
output_token == GSS_C_NO_BUFFER ||
context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (actual_mech != NULL)
*actual_mech = GSS_C_NO_OID;
/* Step 1: perform mechanism negotiation. */
spcred = (spnego_gss_cred_id_t)claimant_cred_handle;
if (*context_handle == GSS_C_NO_CONTEXT) {
ret = init_ctx_new(minor_status, spcred,
context_handle, &send_token);
if (ret != GSS_S_CONTINUE_NEEDED) {
goto cleanup;
}
} else {
ret = init_ctx_cont(minor_status, context_handle,
input_token, &mechtok_in,
&mechListMIC_in, &negState, &send_token);
if (HARD_ERROR(ret)) {
goto cleanup;
}
}
/* Step 2: invoke the selected or optimistic mechanism's
* gss_init_sec_context function, if it didn't complete previously. */
spnego_ctx = (spnego_gss_ctx_id_t)*context_handle;
if (!spnego_ctx->mech_complete) {
ret = init_ctx_call_init(
minor_status, spnego_ctx, spcred,
target_name, req_flags,
time_req, mechtok_in,
actual_mech, &mechtok_out,
ret_flags, time_rec,
&negState, &send_token);
/* Give the mechanism a chance to force a mechlistMIC. */
if (!HARD_ERROR(ret) && mech_requires_mechlistMIC(spnego_ctx))
spnego_ctx->mic_reqd = 1;
}
/* Step 3: process or generate the MIC, if the negotiated mech is
* complete and supports MICs. */
if (!HARD_ERROR(ret) && spnego_ctx->mech_complete &&
(spnego_ctx->ctx_flags & GSS_C_INTEG_FLAG)) {
ret = handle_mic(minor_status,
mechListMIC_in,
(mechtok_out.length != 0),
spnego_ctx, &mechListMIC_out,
&negState, &send_token);
}
cleanup:
if (send_token == INIT_TOKEN_SEND) {
if (make_spnego_tokenInit_msg(spnego_ctx,
0,
mechListMIC_out,
req_flags,
&mechtok_out, send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
} else if (send_token != NO_TOKEN_SEND) {
if (make_spnego_tokenTarg_msg(negState, GSS_C_NO_OID,
&mechtok_out, mechListMIC_out,
send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
}
gss_release_buffer(&tmpmin, &mechtok_out);
if (ret == GSS_S_COMPLETE) {
/*
* Now, switch the output context to refer to the
* negotiated mechanism's context.
*/
*context_handle = (gss_ctx_id_t)spnego_ctx->ctx_handle;
if (actual_mech != NULL)
*actual_mech = spnego_ctx->actual_mech;
if (ret_flags != NULL)
*ret_flags = spnego_ctx->ctx_flags;
release_spnego_ctx(&spnego_ctx);
} else if (ret != GSS_S_CONTINUE_NEEDED) {
if (spnego_ctx != NULL) {
gss_delete_sec_context(&tmpmin,
&spnego_ctx->ctx_handle,
GSS_C_NO_BUFFER);
release_spnego_ctx(&spnego_ctx);
}
*context_handle = GSS_C_NO_CONTEXT;
}
if (mechtok_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechtok_in);
free(mechtok_in);
}
if (mechListMIC_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_in);
free(mechListMIC_in);
}
if (mechListMIC_out != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_out);
free(mechListMIC_out);
}
return ret;
} /* init_sec_context */
/* We don't want to import KRB5 headers here */
static const gss_OID_desc gss_mech_krb5_oid =
{ 9, "\052\206\110\206\367\022\001\002\002" };
static const gss_OID_desc gss_mech_krb5_wrong_oid =
{ 9, "\052\206\110\202\367\022\001\002\002" };
/*
* verify that the input token length is not 0. If it is, just return.
* If the token length is greater than 0, der encode as a sequence
* and place in buf_out, advancing buf_out.
*/
static int
put_neg_hints(unsigned char **buf_out, gss_buffer_t input_token,
unsigned int buflen)
{
int ret;
/* if token length is 0, we do not want to send */
if (input_token->length == 0)
return (0);
if (input_token->length > buflen)
return (-1);
*(*buf_out)++ = SEQUENCE;
if ((ret = gssint_put_der_length(input_token->length, buf_out,
input_token->length)))
return (ret);
TWRITE_STR(*buf_out, input_token->value, input_token->length);
return (0);
}
/*
* NegHints ::= SEQUENCE {
* hintName [0] GeneralString OPTIONAL,
* hintAddress [1] OCTET STRING OPTIONAL
* }
*/
#define HOST_PREFIX "host@"
#define HOST_PREFIX_LEN (sizeof(HOST_PREFIX) - 1)
static int
make_NegHints(OM_uint32 *minor_status,
spnego_gss_cred_id_t spcred, gss_buffer_t *outbuf)
{
gss_buffer_desc hintNameBuf;
gss_name_t hintName = GSS_C_NO_NAME;
gss_name_t hintKerberosName;
gss_OID hintNameType;
OM_uint32 major_status;
OM_uint32 minor;
unsigned int tlen = 0;
unsigned int hintNameSize = 0;
unsigned char *ptr;
unsigned char *t;
*outbuf = GSS_C_NO_BUFFER;
if (spcred != NULL) {
major_status = gss_inquire_cred(minor_status,
spcred->mcred,
&hintName,
NULL,
NULL,
NULL);
if (major_status != GSS_S_COMPLETE)
return (major_status);
}
if (hintName == GSS_C_NO_NAME) {
krb5_error_code code;
krb5int_access kaccess;
char hostname[HOST_PREFIX_LEN + MAXHOSTNAMELEN + 1] = HOST_PREFIX;
code = krb5int_accessor(&kaccess, KRB5INT_ACCESS_VERSION);
if (code != 0) {
*minor_status = code;
return (GSS_S_FAILURE);
}
/* this breaks mutual authentication but Samba relies on it */
code = (*kaccess.clean_hostname)(NULL, NULL,
&hostname[HOST_PREFIX_LEN],
MAXHOSTNAMELEN);
if (code != 0) {
*minor_status = code;
return (GSS_S_FAILURE);
}
hintNameBuf.value = hostname;
hintNameBuf.length = strlen(hostname);
major_status = gss_import_name(minor_status,
&hintNameBuf,
GSS_C_NT_HOSTBASED_SERVICE,
&hintName);
if (major_status != GSS_S_COMPLETE) {
return (major_status);
}
}
hintNameBuf.value = NULL;
hintNameBuf.length = 0;
major_status = gss_canonicalize_name(minor_status,
hintName,
(gss_OID)&gss_mech_krb5_oid,
&hintKerberosName);
if (major_status != GSS_S_COMPLETE) {
gss_release_name(&minor, &hintName);
return (major_status);
}
gss_release_name(&minor, &hintName);
major_status = gss_display_name(minor_status,
hintKerberosName,
&hintNameBuf,
&hintNameType);
if (major_status != GSS_S_COMPLETE) {
gss_release_name(&minor, &hintName);
return (major_status);
}
gss_release_name(&minor, &hintKerberosName);
/*
* Now encode the name hint into a NegHints ASN.1 type
*/
major_status = GSS_S_FAILURE;
/* Length of DER encoded GeneralString */
tlen = 1 + gssint_der_length_size(hintNameBuf.length) +
hintNameBuf.length;
hintNameSize = tlen;
/* Length of DER encoded hintName */
tlen += 1 + gssint_der_length_size(hintNameSize);
t = gssalloc_malloc(tlen);
if (t == NULL) {
*minor_status = ENOMEM;
goto errout;
}
ptr = t;
*ptr++ = CONTEXT | 0x00; /* hintName identifier */
if (gssint_put_der_length(hintNameSize,
&ptr, tlen - (int)(ptr-t)))
goto errout;
*ptr++ = GENERAL_STRING;
if (gssint_put_der_length(hintNameBuf.length,
&ptr, tlen - (int)(ptr-t)))
goto errout;
memcpy(ptr, hintNameBuf.value, hintNameBuf.length);
ptr += hintNameBuf.length;
*outbuf = (gss_buffer_t)malloc(sizeof(gss_buffer_desc));
if (*outbuf == NULL) {
*minor_status = ENOMEM;
goto errout;
}
(*outbuf)->value = (void *)t;
(*outbuf)->length = ptr - t;
t = NULL; /* don't free */
*minor_status = 0;
major_status = GSS_S_COMPLETE;
errout:
if (t != NULL) {
free(t);
}
gss_release_buffer(&minor, &hintNameBuf);
return (major_status);
}
/*
* Support the Microsoft NegHints extension to SPNEGO for compatibility with
* some versions of Samba. See:
* http://msdn.microsoft.com/en-us/library/cc247039(PROT.10).aspx
*/
static OM_uint32
acc_ctx_hints(OM_uint32 *minor_status,
gss_ctx_id_t *ctx,
spnego_gss_cred_id_t spcred,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 tmpmin, ret;
gss_OID_set supported_mechSet;
spnego_gss_ctx_id_t sc = NULL;
*mechListMIC = GSS_C_NO_BUFFER;
supported_mechSet = GSS_C_NO_OID_SET;
*return_token = NO_TOKEN_SEND;
*negState = REJECT;
*minor_status = 0;
/* A hint request must be the first token received. */
if (*ctx != GSS_C_NO_CONTEXT)
return GSS_S_DEFECTIVE_TOKEN;
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT,
&supported_mechSet);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = make_NegHints(minor_status, spcred, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
sc = create_spnego_ctx();
if (sc == NULL) {
ret = GSS_S_FAILURE;
goto cleanup;
}
if (put_mech_set(supported_mechSet, &sc->DER_mechTypes) < 0) {
ret = GSS_S_FAILURE;
goto cleanup;
}
sc->internal_mech = GSS_C_NO_OID;
*negState = ACCEPT_INCOMPLETE;
*return_token = INIT_TOKEN_SEND;
sc->firstpass = 1;
*ctx = (gss_ctx_id_t)sc;
sc = NULL;
ret = GSS_S_COMPLETE;
cleanup:
release_spnego_ctx(&sc);
gss_release_oid_set(&tmpmin, &supported_mechSet);
return ret;
}
/*
* Set negState to REJECT if the token is defective, else
* ACCEPT_INCOMPLETE or REQUEST_MIC, depending on whether initiator's
* preferred mechanism is supported.
*/
static OM_uint32
acc_ctx_new(OM_uint32 *minor_status,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
spnego_gss_cred_id_t spcred,
gss_buffer_t *mechToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 tmpmin, ret, req_flags;
gss_OID_set supported_mechSet, mechTypes;
gss_buffer_desc der_mechTypes;
gss_OID mech_wanted;
spnego_gss_ctx_id_t sc = NULL;
ret = GSS_S_DEFECTIVE_TOKEN;
der_mechTypes.length = 0;
der_mechTypes.value = NULL;
*mechToken = *mechListMIC = GSS_C_NO_BUFFER;
supported_mechSet = mechTypes = GSS_C_NO_OID_SET;
*return_token = ERROR_TOKEN_SEND;
*negState = REJECT;
*minor_status = 0;
ret = get_negTokenInit(minor_status, buf, &der_mechTypes,
&mechTypes, &req_flags,
mechToken, mechListMIC);
if (ret != GSS_S_COMPLETE) {
goto cleanup;
}
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT,
&supported_mechSet);
if (ret != GSS_S_COMPLETE) {
*return_token = NO_TOKEN_SEND;
goto cleanup;
}
/*
* Select the best match between the list of mechs
* that the initiator requested and the list that
* the acceptor will support.
*/
mech_wanted = negotiate_mech(supported_mechSet, mechTypes, negState);
if (*negState == REJECT) {
ret = GSS_S_BAD_MECH;
goto cleanup;
}
sc = (spnego_gss_ctx_id_t)*ctx;
if (sc != NULL) {
gss_release_buffer(&tmpmin, &sc->DER_mechTypes);
assert(mech_wanted != GSS_C_NO_OID);
} else
sc = create_spnego_ctx();
if (sc == NULL) {
ret = GSS_S_FAILURE;
*return_token = NO_TOKEN_SEND;
goto cleanup;
}
sc->mech_set = mechTypes;
mechTypes = GSS_C_NO_OID_SET;
sc->internal_mech = mech_wanted;
sc->DER_mechTypes = der_mechTypes;
der_mechTypes.length = 0;
der_mechTypes.value = NULL;
if (*negState == REQUEST_MIC)
sc->mic_reqd = 1;
*return_token = INIT_TOKEN_SEND;
sc->firstpass = 1;
*ctx = (gss_ctx_id_t)sc;
ret = GSS_S_COMPLETE;
cleanup:
gss_release_oid_set(&tmpmin, &mechTypes);
gss_release_oid_set(&tmpmin, &supported_mechSet);
if (der_mechTypes.length != 0)
gss_release_buffer(&tmpmin, &der_mechTypes);
return ret;
}
static OM_uint32
acc_ctx_cont(OM_uint32 *minstat,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 ret, tmpmin;
gss_OID supportedMech;
spnego_gss_ctx_id_t sc;
unsigned int len;
unsigned char *ptr, *bufstart;
sc = (spnego_gss_ctx_id_t)*ctx;
ret = GSS_S_DEFECTIVE_TOKEN;
*negState = REJECT;
*minstat = 0;
supportedMech = GSS_C_NO_OID;
*return_token = ERROR_TOKEN_SEND;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
#define REMAIN (buf->length - (ptr - bufstart))
if (REMAIN > INT_MAX)
return GSS_S_DEFECTIVE_TOKEN;
/*
* Attempt to work with old Sun SPNEGO.
*/
if (*ptr == HEADER_ID) {
ret = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (ret) {
*minstat = ret;
return GSS_S_DEFECTIVE_TOKEN;
}
}
if (*ptr != (CONTEXT | 0x01)) {
return GSS_S_DEFECTIVE_TOKEN;
}
ret = get_negTokenResp(minstat, ptr, REMAIN,
negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (*responseToken == GSS_C_NO_BUFFER &&
*mechListMIC == GSS_C_NO_BUFFER) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
if (supportedMech != GSS_C_NO_OID) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
sc->firstpass = 0;
*negState = ACCEPT_INCOMPLETE;
*return_token = CONT_TOKEN_SEND;
cleanup:
if (supportedMech != GSS_C_NO_OID) {
generic_gss_release_oid(&tmpmin, &supportedMech);
}
return ret;
#undef REMAIN
}
/*
* Verify that mech OID is either exactly the same as the negotiated
* mech OID, or is a mech OID supported by the negotiated mech. MS
* implementations can list a most preferred mech using an incorrect
* krb5 OID while emitting a krb5 initiator mech token having the
* correct krb5 mech OID.
*/
static OM_uint32
acc_ctx_vfy_oid(OM_uint32 *minor_status,
spnego_gss_ctx_id_t sc, gss_OID mechoid,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin;
gss_mechanism mech = NULL;
gss_OID_set mech_set = GSS_C_NO_OID_SET;
int present = 0;
if (g_OID_equal(sc->internal_mech, mechoid))
return GSS_S_COMPLETE;
mech = gssint_get_mechanism(sc->internal_mech);
if (mech == NULL || mech->gss_indicate_mechs == NULL) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_BAD_MECH;
}
ret = mech->gss_indicate_mechs(minor_status, &mech_set);
if (ret != GSS_S_COMPLETE) {
*tokflag = NO_TOKEN_SEND;
map_error(minor_status, mech);
goto cleanup;
}
ret = gss_test_oid_set_member(minor_status, mechoid,
mech_set, &present);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (!present) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ret = GSS_S_BAD_MECH;
}
cleanup:
gss_release_oid_set(&tmpmin, &mech_set);
return ret;
}
#ifndef LEAN_CLIENT
/*
* Wrap call to gss_accept_sec_context() and update state
* accordingly.
*/
static OM_uint32
acc_ctx_call_acc(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
spnego_gss_cred_id_t spcred, gss_buffer_t mechtok_in,
gss_OID *mech_type, gss_buffer_t mechtok_out,
OM_uint32 *ret_flags, OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
gss_OID_desc mechoid;
gss_cred_id_t mcred;
if (sc->ctx_handle == GSS_C_NO_CONTEXT) {
/*
* mechoid is an alias; don't free it.
*/
ret = gssint_get_mech_type(&mechoid, mechtok_in);
if (ret != GSS_S_COMPLETE) {
*tokflag = NO_TOKEN_SEND;
return ret;
}
ret = acc_ctx_vfy_oid(minor_status, sc, &mechoid,
negState, tokflag);
if (ret != GSS_S_COMPLETE)
return ret;
}
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_accept_sec_context(minor_status,
&sc->ctx_handle,
mcred,
mechtok_in,
GSS_C_NO_CHANNEL_BINDINGS,
&sc->internal_name,
mech_type,
mechtok_out,
&sc->ctx_flags,
time_rec,
delegated_cred_handle);
if (ret == GSS_S_COMPLETE) {
#ifdef MS_BUG_TEST
/*
* Force MIC to be not required even if we previously
* requested a MIC.
*/
char *envstr = getenv("MS_FORCE_NO_MIC");
if (envstr != NULL && strcmp(envstr, "1") == 0 &&
!(sc->ctx_flags & GSS_C_MUTUAL_FLAG) &&
sc->mic_reqd) {
sc->mic_reqd = 0;
}
#endif
sc->mech_complete = 1;
if (ret_flags != NULL)
*ret_flags = sc->ctx_flags;
if (!sc->mic_reqd ||
!(sc->ctx_flags & GSS_C_INTEG_FLAG)) {
/* No MIC exchange required, so we're done. */
*negState = ACCEPT_COMPLETE;
ret = GSS_S_COMPLETE;
} else {
/* handle_mic will decide if we're done. */
ret = GSS_S_CONTINUE_NEEDED;
}
} else if (ret != GSS_S_CONTINUE_NEEDED) {
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
}
return ret;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_accept_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_cred_id_t verifier_cred_handle,
gss_buffer_t input_token,
gss_channel_bindings_t input_chan_bindings,
gss_name_t *src_name,
gss_OID *mech_type,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle)
{
OM_uint32 ret, tmpmin, negState;
send_token_flag return_token;
gss_buffer_t mechtok_in, mic_in, mic_out;
gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER;
spnego_gss_ctx_id_t sc = NULL;
spnego_gss_cred_id_t spcred = NULL;
int sendTokenInit = 0, tmpret;
mechtok_in = mic_in = mic_out = GSS_C_NO_BUFFER;
/*
* This function works in three steps:
*
* 1. Perform mechanism negotiation.
* 2. Invoke the negotiated mech's gss_accept_sec_context function
* and examine the results.
* 3. Process or generate MICs if necessary.
*
* Step one determines whether the negotiation requires a MIC exchange,
* while steps two and three share responsibility for determining when
* the exchange is complete. If the selected mech completes in this
* call and no MIC exchange is expected, then step 2 will decide. If a
* MIC exchange is expected, then step 3 will decide. If an error
* occurs in any step, the exchange will be aborted, possibly with an
* error token.
*
* negState determines the state of the negotiation, and is
* communicated to the acceptor if a continuing token is sent.
* return_token is used to indicate what type of token, if any, should
* be generated.
*/
/* Validate arguments. */
if (minor_status != NULL)
*minor_status = 0;
if (output_token != GSS_C_NO_BUFFER) {
output_token->length = 0;
output_token->value = NULL;
}
if (minor_status == NULL ||
output_token == GSS_C_NO_BUFFER ||
context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (input_token == GSS_C_NO_BUFFER)
return GSS_S_CALL_INACCESSIBLE_READ;
/* Step 1: Perform mechanism negotiation. */
sc = (spnego_gss_ctx_id_t)*context_handle;
spcred = (spnego_gss_cred_id_t)verifier_cred_handle;
if (sc == NULL || sc->internal_mech == GSS_C_NO_OID) {
/* Process an initial token or request for NegHints. */
if (src_name != NULL)
*src_name = GSS_C_NO_NAME;
if (mech_type != NULL)
*mech_type = GSS_C_NO_OID;
if (time_rec != NULL)
*time_rec = 0;
if (ret_flags != NULL)
*ret_flags = 0;
if (delegated_cred_handle != NULL)
*delegated_cred_handle = GSS_C_NO_CREDENTIAL;
if (input_token->length == 0) {
ret = acc_ctx_hints(minor_status,
context_handle, spcred,
&mic_out,
&negState,
&return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
sendTokenInit = 1;
ret = GSS_S_CONTINUE_NEEDED;
} else {
/* Can set negState to REQUEST_MIC */
ret = acc_ctx_new(minor_status, input_token,
context_handle, spcred,
&mechtok_in, &mic_in,
&negState, &return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = GSS_S_CONTINUE_NEEDED;
}
} else {
/* Process a response token. Can set negState to
* ACCEPT_INCOMPLETE. */
ret = acc_ctx_cont(minor_status, input_token,
context_handle, &mechtok_in,
&mic_in, &negState, &return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = GSS_S_CONTINUE_NEEDED;
}
/* Step 2: invoke the negotiated mechanism's gss_accept_sec_context
* function. */
sc = (spnego_gss_ctx_id_t)*context_handle;
/*
* Handle mechtok_in and mic_in only if they are
* present in input_token. If neither is present, whether
* this is an error depends on whether this is the first
* round-trip. RET is set to a default value according to
* whether it is the first round-trip.
*/
if (negState != REQUEST_MIC && mechtok_in != GSS_C_NO_BUFFER) {
ret = acc_ctx_call_acc(minor_status, sc, spcred,
mechtok_in, mech_type, &mechtok_out,
ret_flags, time_rec,
delegated_cred_handle,
&negState, &return_token);
}
/* Step 3: process or generate the MIC, if the negotiated mech is
* complete and supports MICs. */
if (!HARD_ERROR(ret) && sc->mech_complete &&
(sc->ctx_flags & GSS_C_INTEG_FLAG)) {
ret = handle_mic(minor_status, mic_in,
(mechtok_out.length != 0),
sc, &mic_out,
&negState, &return_token);
}
cleanup:
if (return_token == INIT_TOKEN_SEND && sendTokenInit) {
assert(sc != NULL);
tmpret = make_spnego_tokenInit_msg(sc, 1, mic_out, 0,
GSS_C_NO_BUFFER,
return_token, output_token);
if (tmpret < 0)
ret = GSS_S_FAILURE;
} else if (return_token != NO_TOKEN_SEND &&
return_token != CHECK_MIC) {
tmpret = make_spnego_tokenTarg_msg(negState,
sc ? sc->internal_mech :
GSS_C_NO_OID,
&mechtok_out, mic_out,
return_token,
output_token);
if (tmpret < 0)
ret = GSS_S_FAILURE;
}
if (ret == GSS_S_COMPLETE) {
*context_handle = (gss_ctx_id_t)sc->ctx_handle;
if (sc->internal_name != GSS_C_NO_NAME &&
src_name != NULL) {
*src_name = sc->internal_name;
sc->internal_name = GSS_C_NO_NAME;
}
release_spnego_ctx(&sc);
} else if (ret != GSS_S_CONTINUE_NEEDED) {
if (sc != NULL) {
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
release_spnego_ctx(&sc);
}
*context_handle = GSS_C_NO_CONTEXT;
}
gss_release_buffer(&tmpmin, &mechtok_out);
if (mechtok_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechtok_in);
free(mechtok_in);
}
if (mic_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mic_in);
free(mic_in);
}
if (mic_out != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mic_out);
free(mic_out);
}
return ret;
}
#endif /* LEAN_CLIENT */
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_display_status(
OM_uint32 *minor_status,
OM_uint32 status_value,
int status_type,
gss_OID mech_type,
OM_uint32 *message_context,
gss_buffer_t status_string)
{
OM_uint32 maj = GSS_S_COMPLETE;
int ret;
dsyslog("Entering display_status\n");
*message_context = 0;
switch (status_value) {
case ERR_SPNEGO_NO_MECHS_AVAILABLE:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO cannot find "
"mechanisms to negotiate"));
break;
case ERR_SPNEGO_NO_CREDS_ACQUIRED:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO failed to acquire "
"creds"));
break;
case ERR_SPNEGO_NO_MECH_FROM_ACCEPTOR:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO acceptor did not "
"select a mechanism"));
break;
case ERR_SPNEGO_NEGOTIATION_FAILED:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO failed to negotiate a "
"mechanism"));
break;
case ERR_SPNEGO_NO_TOKEN_FROM_ACCEPTOR:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO acceptor did not "
"return a valid token"));
break;
default:
/* Not one of our minor codes; might be from a mech. Call back
* to gss_display_status, but first check for recursion. */
if (k5_getspecific(K5_KEY_GSS_SPNEGO_STATUS) != NULL) {
/* Perhaps we returned a com_err code like ENOMEM. */
const char *err = error_message(status_value);
*status_string = make_err_msg(err);
break;
}
/* Set a non-null pointer value; doesn't matter which one. */
ret = k5_setspecific(K5_KEY_GSS_SPNEGO_STATUS, &ret);
if (ret != 0) {
*minor_status = ret;
maj = GSS_S_FAILURE;
break;
}
maj = gss_display_status(minor_status, status_value,
status_type, mech_type,
message_context, status_string);
/* This is unlikely to fail; not much we can do if it does. */
(void)k5_setspecific(K5_KEY_GSS_SPNEGO_STATUS, NULL);
break;
}
dsyslog("Leaving display_status\n");
return maj;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_import_name(
OM_uint32 *minor_status,
gss_buffer_t input_name_buffer,
gss_OID input_name_type,
gss_name_t *output_name)
{
OM_uint32 status;
dsyslog("Entering import_name\n");
status = gss_import_name(minor_status, input_name_buffer,
input_name_type, output_name);
dsyslog("Leaving import_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_release_name(
OM_uint32 *minor_status,
gss_name_t *input_name)
{
OM_uint32 status;
dsyslog("Entering release_name\n");
status = gss_release_name(minor_status, input_name);
dsyslog("Leaving release_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_duplicate_name(
OM_uint32 *minor_status,
const gss_name_t input_name,
gss_name_t *output_name)
{
OM_uint32 status;
dsyslog("Entering duplicate_name\n");
status = gss_duplicate_name(minor_status, input_name, output_name);
dsyslog("Leaving duplicate_name\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_cred(
OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
gss_name_t *name,
OM_uint32 *lifetime,
int *cred_usage,
gss_OID_set *mechanisms)
{
OM_uint32 status;
spnego_gss_cred_id_t spcred = NULL;
gss_cred_id_t creds = GSS_C_NO_CREDENTIAL;
OM_uint32 tmp_minor_status;
OM_uint32 initiator_lifetime, acceptor_lifetime;
dsyslog("Entering inquire_cred\n");
/*
* To avoid infinite recursion, if GSS_C_NO_CREDENTIAL is
* supplied we call gss_inquire_cred_by_mech() on the
* first non-SPNEGO mechanism.
*/
spcred = (spnego_gss_cred_id_t)cred_handle;
if (spcred == NULL) {
status = get_available_mechs(minor_status,
GSS_C_NO_NAME,
GSS_C_BOTH,
GSS_C_NO_CRED_STORE,
&creds,
mechanisms);
if (status != GSS_S_COMPLETE) {
dsyslog("Leaving inquire_cred\n");
return (status);
}
if ((*mechanisms)->count == 0) {
gss_release_cred(&tmp_minor_status, &creds);
gss_release_oid_set(&tmp_minor_status, mechanisms);
dsyslog("Leaving inquire_cred\n");
return (GSS_S_DEFECTIVE_CREDENTIAL);
}
assert((*mechanisms)->elements != NULL);
status = gss_inquire_cred_by_mech(minor_status,
creds,
&(*mechanisms)->elements[0],
name,
&initiator_lifetime,
&acceptor_lifetime,
cred_usage);
if (status != GSS_S_COMPLETE) {
gss_release_cred(&tmp_minor_status, &creds);
dsyslog("Leaving inquire_cred\n");
return (status);
}
if (lifetime != NULL)
*lifetime = (*cred_usage == GSS_C_ACCEPT) ?
acceptor_lifetime : initiator_lifetime;
gss_release_cred(&tmp_minor_status, &creds);
} else {
status = gss_inquire_cred(minor_status, spcred->mcred,
name, lifetime,
cred_usage, mechanisms);
}
dsyslog("Leaving inquire_cred\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_compare_name(
OM_uint32 *minor_status,
const gss_name_t name1,
const gss_name_t name2,
int *name_equal)
{
OM_uint32 status = GSS_S_COMPLETE;
dsyslog("Entering compare_name\n");
status = gss_compare_name(minor_status, name1, name2, name_equal);
dsyslog("Leaving compare_name\n");
return (status);
}
/*ARGSUSED*/
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_display_name(
OM_uint32 *minor_status,
gss_name_t input_name,
gss_buffer_t output_name_buffer,
gss_OID *output_name_type)
{
OM_uint32 status = GSS_S_COMPLETE;
dsyslog("Entering display_name\n");
status = gss_display_name(minor_status, input_name,
output_name_buffer, output_name_type);
dsyslog("Leaving display_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_names_for_mech(
OM_uint32 *minor_status,
gss_OID mechanism,
gss_OID_set *name_types)
{
OM_uint32 major, minor;
dsyslog("Entering inquire_names_for_mech\n");
/*
* We only know how to handle our own mechanism.
*/
if ((mechanism != GSS_C_NULL_OID) &&
!g_OID_equal(gss_mech_spnego, mechanism)) {
*minor_status = 0;
return (GSS_S_FAILURE);
}
major = gss_create_empty_oid_set(minor_status, name_types);
if (major == GSS_S_COMPLETE) {
/* Now add our members. */
if (((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_USER_NAME,
name_types)) == GSS_S_COMPLETE) &&
((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_MACHINE_UID_NAME,
name_types)) == GSS_S_COMPLETE) &&
((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_STRING_UID_NAME,
name_types)) == GSS_S_COMPLETE)) {
major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_HOSTBASED_SERVICE,
name_types);
}
if (major != GSS_S_COMPLETE)
(void) gss_release_oid_set(&minor, name_types);
}
dsyslog("Leaving inquire_names_for_mech\n");
return (major);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t output_message_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_unwrap(minor_status,
context_handle,
input_message_buffer,
output_message_buffer,
conf_state,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_message_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
ret = gss_wrap(minor_status,
context_handle,
conf_req_flag,
qop_req,
input_message_buffer,
conf_state,
output_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_process_context_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t token_buffer)
{
OM_uint32 ret;
ret = gss_process_context_token(minor_status,
context_handle,
token_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_delete_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t output_token)
{
OM_uint32 ret = GSS_S_COMPLETE;
spnego_gss_ctx_id_t *ctx =
(spnego_gss_ctx_id_t *)context_handle;
*minor_status = 0;
if (context_handle == NULL)
return (GSS_S_FAILURE);
if (*ctx == NULL)
return (GSS_S_COMPLETE);
/*
* If this is still an SPNEGO mech, release it locally.
*/
if ((*ctx)->magic_num == SPNEGO_MAGIC_ID) {
(void) gss_delete_sec_context(minor_status,
&(*ctx)->ctx_handle,
output_token);
(void) release_spnego_ctx(ctx);
} else {
ret = gss_delete_sec_context(minor_status,
context_handle,
output_token);
}
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_context_time(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
OM_uint32 *time_rec)
{
OM_uint32 ret;
ret = gss_context_time(minor_status,
context_handle,
time_rec);
return (ret);
}
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV
spnego_gss_export_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token)
{
OM_uint32 ret;
ret = gss_export_sec_context(minor_status,
context_handle,
interprocess_token);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_import_sec_context(
OM_uint32 *minor_status,
const gss_buffer_t interprocess_token,
gss_ctx_id_t *context_handle)
{
OM_uint32 ret;
ret = gss_import_sec_context(minor_status,
interprocess_token,
context_handle);
return (ret);
}
#endif /* LEAN_CLIENT */
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_context(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_name_t *src_name,
gss_name_t *targ_name,
OM_uint32 *lifetime_rec,
gss_OID *mech_type,
OM_uint32 *ctx_flags,
int *locally_initiated,
int *opened)
{
OM_uint32 ret = GSS_S_COMPLETE;
ret = gss_inquire_context(minor_status,
context_handle,
src_name,
targ_name,
lifetime_rec,
mech_type,
ctx_flags,
locally_initiated,
opened);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_size_limit(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
OM_uint32 req_output_size,
OM_uint32 *max_input_size)
{
OM_uint32 ret;
ret = gss_wrap_size_limit(minor_status,
context_handle,
conf_req_flag,
qop_req,
req_output_size,
max_input_size);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_qop_t qop_req,
const gss_buffer_t message_buffer,
gss_buffer_t message_token)
{
OM_uint32 ret;
ret = gss_get_mic(minor_status,
context_handle,
qop_req,
message_buffer,
message_token);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_verify_mic(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t msg_buffer,
const gss_buffer_t token_buffer,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_verify_mic(minor_status,
context_handle,
msg_buffer,
token_buffer,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_sec_context_by_oid(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
ret = gss_inquire_sec_context_by_oid(minor_status,
context_handle,
desired_object,
data_set);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_cred_by_oid(
OM_uint32 *minor_status,
const gss_cred_id_t cred_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_inquire_cred_by_oid(minor_status,
mcred,
desired_object,
data_set);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_cred_option(
OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 ret;
OM_uint32 tmp_minor_status;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)*cred_handle;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_set_cred_option(minor_status,
&mcred,
desired_object,
value);
if (ret == GSS_S_COMPLETE && spcred == NULL) {
/*
* If the mechanism allocated a new credential handle, then
* we need to wrap it up in an SPNEGO credential handle.
*/
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
gss_release_cred(&tmp_minor_status, &mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->mcred = mcred;
spcred->neg_mechs = GSS_C_NULL_OID_SET;
*cred_handle = (gss_cred_id_t)spcred;
}
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_sec_context_option(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 ret;
ret = gss_set_sec_context_option(minor_status,
context_handle,
desired_object,
value);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_assoc_buffer,
gss_buffer_t input_payload_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
ret = gss_wrap_aead(minor_status,
context_handle,
conf_req_flag,
qop_req,
input_assoc_buffer,
input_payload_buffer,
conf_state,
output_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t input_assoc_buffer,
gss_buffer_t output_payload_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_unwrap_aead(minor_status,
context_handle,
input_message_buffer,
input_assoc_buffer,
output_payload_buffer,
conf_state,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_wrap_iov(minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int *conf_state,
gss_qop_t *qop_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_unwrap_iov(minor_status,
context_handle,
conf_state,
qop_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_wrap_iov_length(minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_complete_auth_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer)
{
OM_uint32 ret;
ret = gss_complete_auth_token(minor_status,
context_handle,
input_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_impersonate_name(OM_uint32 *minor_status,
const gss_cred_id_t impersonator_cred_handle,
const gss_name_t desired_name,
OM_uint32 time_req,
gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status;
gss_OID_set amechs = GSS_C_NULL_OID_SET;
spnego_gss_cred_id_t imp_spcred = NULL, out_spcred = NULL;
gss_cred_id_t imp_mcred, out_mcred;
dsyslog("Entering spnego_gss_acquire_cred_impersonate_name\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
imp_spcred = (spnego_gss_cred_id_t)impersonator_cred_handle;
imp_mcred = imp_spcred ? imp_spcred->mcred : GSS_C_NO_CREDENTIAL;
if (desired_mechs == GSS_C_NO_OID_SET) {
status = gss_inquire_cred(minor_status, imp_mcred, NULL, NULL,
NULL, &amechs);
if (status != GSS_S_COMPLETE)
return status;
desired_mechs = amechs;
}
status = gss_acquire_cred_impersonate_name(minor_status, imp_mcred,
desired_name, time_req,
desired_mechs, cred_usage,
&out_mcred, actual_mechs,
time_rec);
if (amechs != GSS_C_NULL_OID_SET)
(void) gss_release_oid_set(minor_status, &amechs);
out_spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (out_spcred == NULL) {
gss_release_cred(minor_status, &out_mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
out_spcred->mcred = out_mcred;
out_spcred->neg_mechs = GSS_C_NULL_OID_SET;
*output_cred_handle = (gss_cred_id_t)out_spcred;
dsyslog("Leaving spnego_gss_acquire_cred_impersonate_name\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_with_password(OM_uint32 *minor_status,
const gss_name_t desired_name,
const gss_buffer_t password,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status, tmpmin;
gss_OID_set amechs = GSS_C_NULL_OID_SET;
gss_cred_id_t mcred = NULL;
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_acquire_cred_with_password\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
status = get_available_mechs(minor_status, desired_name,
cred_usage, GSS_C_NO_CRED_STORE,
NULL, &amechs);
if (status != GSS_S_COMPLETE)
goto cleanup;
status = gss_acquire_cred_with_password(minor_status, desired_name,
password, time_req, amechs,
cred_usage, &mcred,
actual_mechs, time_rec);
if (status != GSS_S_COMPLETE)
goto cleanup;
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
*minor_status = ENOMEM;
status = GSS_S_FAILURE;
goto cleanup;
}
spcred->neg_mechs = GSS_C_NULL_OID_SET;
spcred->mcred = mcred;
mcred = GSS_C_NO_CREDENTIAL;
*output_cred_handle = (gss_cred_id_t)spcred;
cleanup:
(void) gss_release_oid_set(&tmpmin, &amechs);
(void) gss_release_cred(&tmpmin, &mcred);
dsyslog("Leaving spnego_gss_acquire_cred_with_password\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_display_name_ext(OM_uint32 *minor_status,
gss_name_t name,
gss_OID display_as_name_type,
gss_buffer_t display_name)
{
OM_uint32 ret;
ret = gss_display_name_ext(minor_status,
name,
display_as_name_type,
display_name);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_name(OM_uint32 *minor_status,
gss_name_t name,
int *name_is_MN,
gss_OID *MN_mech,
gss_buffer_set_t *attrs)
{
OM_uint32 ret;
ret = gss_inquire_name(minor_status,
name,
name_is_MN,
MN_mech,
attrs);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr,
int *authenticated,
int *complete,
gss_buffer_t value,
gss_buffer_t display_value,
int *more)
{
OM_uint32 ret;
ret = gss_get_name_attribute(minor_status,
name,
attr,
authenticated,
complete,
value,
display_value,
more);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
int complete,
gss_buffer_t attr,
gss_buffer_t value)
{
OM_uint32 ret;
ret = gss_set_name_attribute(minor_status,
name,
complete,
attr,
value);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_delete_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr)
{
OM_uint32 ret;
ret = gss_delete_name_attribute(minor_status,
name,
attr);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_export_name_composite(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t exp_composite_name)
{
OM_uint32 ret;
ret = gss_export_name_composite(minor_status,
name,
exp_composite_name);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_map_name_to_any(OM_uint32 *minor_status,
gss_name_t name,
int authenticated,
gss_buffer_t type_id,
gss_any_t *output)
{
OM_uint32 ret;
ret = gss_map_name_to_any(minor_status,
name,
authenticated,
type_id,
output);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_release_any_name_mapping(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t type_id,
gss_any_t *input)
{
OM_uint32 ret;
ret = gss_release_any_name_mapping(minor_status,
name,
type_id,
input);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_pseudo_random(OM_uint32 *minor_status,
gss_ctx_id_t context,
int prf_key,
const gss_buffer_t prf_in,
ssize_t desired_output_len,
gss_buffer_t prf_out)
{
OM_uint32 ret;
ret = gss_pseudo_random(minor_status,
context,
prf_key,
prf_in,
desired_output_len,
prf_out);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_neg_mechs(OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
const gss_OID_set mech_list)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
/* Store mech_list in spcred for use in negotiation logic. */
gss_release_oid_set(minor_status, &spcred->neg_mechs);
ret = generic_gss_copy_oid_set(minor_status, mech_list,
&spcred->neg_mechs);
return (ret);
}
#define SPNEGO_SASL_NAME "SPNEGO"
#define SPNEGO_SASL_NAME_LEN (sizeof(SPNEGO_SASL_NAME) - 1)
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_mech_for_saslname(OM_uint32 *minor_status,
const gss_buffer_t sasl_mech_name,
gss_OID *mech_type)
{
if (sasl_mech_name->length == SPNEGO_SASL_NAME_LEN &&
memcmp(sasl_mech_name->value, SPNEGO_SASL_NAME,
SPNEGO_SASL_NAME_LEN) == 0) {
if (mech_type != NULL)
*mech_type = (gss_OID)gss_mech_spnego;
return (GSS_S_COMPLETE);
}
return (GSS_S_BAD_MECH);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_saslname_for_mech(OM_uint32 *minor_status,
const gss_OID desired_mech,
gss_buffer_t sasl_mech_name,
gss_buffer_t mech_name,
gss_buffer_t mech_description)
{
*minor_status = 0;
if (!g_OID_equal(desired_mech, gss_mech_spnego))
return (GSS_S_BAD_MECH);
if (!g_make_string_buffer(SPNEGO_SASL_NAME, sasl_mech_name) ||
!g_make_string_buffer("spnego", mech_name) ||
!g_make_string_buffer("Simple and Protected GSS-API "
"Negotiation Mechanism", mech_description))
goto fail;
return (GSS_S_COMPLETE);
fail:
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_attrs_for_mech(OM_uint32 *minor_status,
gss_const_OID mech,
gss_OID_set *mech_attrs,
gss_OID_set *known_mech_attrs)
{
OM_uint32 major, tmpMinor;
/* known_mech_attrs is handled by mechglue */
*minor_status = 0;
if (mech_attrs == NULL)
return (GSS_S_COMPLETE);
major = gss_create_empty_oid_set(minor_status, mech_attrs);
if (GSS_ERROR(major))
goto cleanup;
#define MA_SUPPORTED(ma) do { \
major = gss_add_oid_set_member(minor_status, \
(gss_OID)ma, mech_attrs); \
if (GSS_ERROR(major)) \
goto cleanup; \
} while (0)
MA_SUPPORTED(GSS_C_MA_MECH_NEGO);
MA_SUPPORTED(GSS_C_MA_ITOK_FRAMED);
cleanup:
if (GSS_ERROR(major))
gss_release_oid_set(&tmpMinor, mech_attrs);
return (major);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_export_cred(OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
gss_buffer_t token)
{
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
return (gss_export_cred(minor_status, spcred->mcred, token));
}
OM_uint32 KRB5_CALLCONV
spnego_gss_import_cred(OM_uint32 *minor_status,
gss_buffer_t token,
gss_cred_id_t *cred_handle)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred;
gss_cred_id_t mcred;
ret = gss_import_cred(minor_status, token, &mcred);
if (GSS_ERROR(ret))
return (ret);
spcred = malloc(sizeof(*spcred));
if (spcred == NULL) {
gss_release_cred(minor_status, &mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->mcred = mcred;
spcred->neg_mechs = GSS_C_NULL_OID_SET;
*cred_handle = (gss_cred_id_t)spcred;
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov,
int iov_count)
{
return gss_get_mic_iov(minor_status, context_handle, qop_req, iov,
iov_count);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_verify_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t *qop_state, gss_iov_buffer_desc *iov,
int iov_count)
{
return gss_verify_mic_iov(minor_status, context_handle, qop_state, iov,
iov_count);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, gss_qop_t qop_req,
gss_iov_buffer_desc *iov, int iov_count)
{
return gss_get_mic_iov_length(minor_status, context_handle, qop_req, iov,
iov_count);
}
/*
* We will release everything but the ctx_handle so that it
* can be passed back to init/accept context. This routine should
* not be called until after the ctx_handle memory is assigned to
* the supplied context handle from init/accept context.
*/
static void
release_spnego_ctx(spnego_gss_ctx_id_t *ctx)
{
spnego_gss_ctx_id_t context;
OM_uint32 minor_stat;
context = *ctx;
if (context != NULL) {
(void) gss_release_buffer(&minor_stat,
&context->DER_mechTypes);
(void) gss_release_oid_set(&minor_stat, &context->mech_set);
(void) gss_release_name(&minor_stat, &context->internal_name);
if (context->optionStr != NULL) {
free(context->optionStr);
context->optionStr = NULL;
}
free(context);
*ctx = NULL;
}
}
/*
* Can't use gss_indicate_mechs by itself to get available mechs for
* SPNEGO because it will also return the SPNEGO mech and we do not
* want to consider SPNEGO as an available security mech for
* negotiation. For this reason, get_available_mechs will return
* all available mechs except SPNEGO.
*
* If a ptr to a creds list is given, this function will attempt
* to acquire creds for the creds given and trim the list of
* returned mechanisms to only those for which creds are valid.
*
*/
static OM_uint32
get_available_mechs(OM_uint32 *minor_status,
gss_name_t name, gss_cred_usage_t usage,
gss_const_key_value_set_t cred_store,
gss_cred_id_t *creds, gss_OID_set *rmechs)
{
unsigned int i;
int found = 0;
OM_uint32 major_status = GSS_S_COMPLETE, tmpmin;
gss_OID_set mechs, goodmechs;
major_status = gss_indicate_mechs(minor_status, &mechs);
if (major_status != GSS_S_COMPLETE) {
return (major_status);
}
major_status = gss_create_empty_oid_set(minor_status, rmechs);
if (major_status != GSS_S_COMPLETE) {
(void) gss_release_oid_set(minor_status, &mechs);
return (major_status);
}
for (i = 0; i < mechs->count && major_status == GSS_S_COMPLETE; i++) {
if ((mechs->elements[i].length
!= spnego_mechanism.mech_type.length) ||
memcmp(mechs->elements[i].elements,
spnego_mechanism.mech_type.elements,
spnego_mechanism.mech_type.length)) {
major_status = gss_add_oid_set_member(minor_status,
&mechs->elements[i],
rmechs);
if (major_status == GSS_S_COMPLETE)
found++;
}
}
/*
* If the caller wanted a list of creds returned,
* trim the list of mechanisms down to only those
* for which the creds are valid.
*/
if (found > 0 && major_status == GSS_S_COMPLETE && creds != NULL) {
major_status = gss_acquire_cred_from(minor_status, name,
GSS_C_INDEFINITE,
*rmechs, usage,
cred_store, creds,
&goodmechs, NULL);
/*
* Drop the old list in favor of the new
* "trimmed" list.
*/
(void) gss_release_oid_set(&tmpmin, rmechs);
if (major_status == GSS_S_COMPLETE) {
(void) gssint_copy_oid_set(&tmpmin,
goodmechs, rmechs);
(void) gss_release_oid_set(&tmpmin, &goodmechs);
}
}
(void) gss_release_oid_set(&tmpmin, &mechs);
if (found == 0 || major_status != GSS_S_COMPLETE) {
*minor_status = ERR_SPNEGO_NO_MECHS_AVAILABLE;
map_errcode(minor_status);
if (major_status == GSS_S_COMPLETE)
major_status = GSS_S_FAILURE;
}
return (major_status);
}
/*
* Return a list of mechanisms we are willing to negotiate for a credential,
* taking into account the mech set provided with gss_set_neg_mechs if it
* exists.
*/
static OM_uint32
get_negotiable_mechs(OM_uint32 *minor_status, spnego_gss_cred_id_t spcred,
gss_cred_usage_t usage, gss_OID_set *rmechs)
{
OM_uint32 ret, tmpmin;
gss_cred_id_t creds = GSS_C_NO_CREDENTIAL, *credptr;
gss_OID_set cred_mechs = GSS_C_NULL_OID_SET;
gss_OID_set intersect_mechs = GSS_C_NULL_OID_SET;
unsigned int i;
int present;
if (spcred == NULL) {
/*
* The default credentials were supplied. Return a list of all
* available mechs except SPNEGO. When initiating, trim this
* list to mechs we can acquire credentials for.
*/
credptr = (usage == GSS_C_INITIATE) ? &creds : NULL;
ret = get_available_mechs(minor_status, GSS_C_NO_NAME, usage,
GSS_C_NO_CRED_STORE, credptr,
rmechs);
gss_release_cred(&tmpmin, &creds);
return (ret);
}
/* Get the list of mechs in the mechglue cred. */
ret = gss_inquire_cred(minor_status, spcred->mcred, NULL, NULL, NULL,
&cred_mechs);
if (ret != GSS_S_COMPLETE)
return (ret);
if (spcred->neg_mechs == GSS_C_NULL_OID_SET) {
/* gss_set_neg_mechs was never called; return cred_mechs. */
*rmechs = cred_mechs;
*minor_status = 0;
return (GSS_S_COMPLETE);
}
/* Compute the intersection of cred_mechs and spcred->neg_mechs,
* preserving the order in spcred->neg_mechs. */
ret = gss_create_empty_oid_set(minor_status, &intersect_mechs);
if (ret != GSS_S_COMPLETE) {
gss_release_oid_set(&tmpmin, &cred_mechs);
return (ret);
}
for (i = 0; i < spcred->neg_mechs->count; i++) {
gss_test_oid_set_member(&tmpmin,
&spcred->neg_mechs->elements[i],
cred_mechs, &present);
if (!present)
continue;
ret = gss_add_oid_set_member(minor_status,
&spcred->neg_mechs->elements[i],
&intersect_mechs);
if (ret != GSS_S_COMPLETE)
break;
}
gss_release_oid_set(&tmpmin, &cred_mechs);
if (intersect_mechs->count == 0 || ret != GSS_S_COMPLETE) {
gss_release_oid_set(&tmpmin, &intersect_mechs);
*minor_status = ERR_SPNEGO_NO_MECHS_AVAILABLE;
map_errcode(minor_status);
return (GSS_S_FAILURE);
}
*rmechs = intersect_mechs;
*minor_status = 0;
return (GSS_S_COMPLETE);
}
/* following are token creation and reading routines */
/*
* If buff_in is not pointing to a MECH_OID, then return NULL and do not
* advance the buffer, otherwise, decode the mech_oid from the buffer and
* place in gss_OID.
*/
static gss_OID
get_mech_oid(OM_uint32 *minor_status, unsigned char **buff_in, size_t length)
{
OM_uint32 status;
gss_OID_desc toid;
gss_OID mech_out = NULL;
unsigned char *start, *end;
if (length < 1 || **buff_in != MECH_OID)
return (NULL);
start = *buff_in;
end = start + length;
(*buff_in)++;
toid.length = *(*buff_in)++;
if ((*buff_in + toid.length) > end)
return (NULL);
toid.elements = *buff_in;
*buff_in += toid.length;
status = generic_gss_copy_oid(minor_status, &toid, &mech_out);
if (status != GSS_S_COMPLETE) {
map_errcode(minor_status);
mech_out = NULL;
}
return (mech_out);
}
/*
* der encode the given mechanism oid into buf_out, advancing the
* buffer pointer.
*/
static int
put_mech_oid(unsigned char **buf_out, gss_OID_const mech, unsigned int buflen)
{
if (buflen < mech->length + 2)
return (-1);
*(*buf_out)++ = MECH_OID;
*(*buf_out)++ = (unsigned char) mech->length;
memcpy(*buf_out, mech->elements, mech->length);
*buf_out += mech->length;
return (0);
}
/*
* verify that buff_in points to an octet string, if it does not,
* return NULL and don't advance the pointer. If it is an octet string
* decode buff_in into a gss_buffer_t and return it, advancing the
* buffer pointer.
*/
static gss_buffer_t
get_input_token(unsigned char **buff_in, unsigned int buff_length)
{
gss_buffer_t input_token;
unsigned int len;
if (g_get_tag_and_length(buff_in, OCTET_STRING, buff_length, &len) < 0)
return (NULL);
input_token = (gss_buffer_t)malloc(sizeof (gss_buffer_desc));
if (input_token == NULL)
return (NULL);
input_token->length = len;
if (input_token->length > 0) {
input_token->value = gssalloc_malloc(input_token->length);
if (input_token->value == NULL) {
free(input_token);
return (NULL);
}
memcpy(input_token->value, *buff_in, input_token->length);
} else {
input_token->value = NULL;
}
*buff_in += input_token->length;
return (input_token);
}
/*
* verify that the input token length is not 0. If it is, just return.
* If the token length is greater than 0, der encode as an octet string
* and place in buf_out, advancing buf_out.
*/
static int
put_input_token(unsigned char **buf_out, gss_buffer_t input_token,
unsigned int buflen)
{
int ret;
/* if token length is 0, we do not want to send */
if (input_token->length == 0)
return (0);
if (input_token->length > buflen)
return (-1);
*(*buf_out)++ = OCTET_STRING;
if ((ret = gssint_put_der_length(input_token->length, buf_out,
input_token->length)))
return (ret);
TWRITE_STR(*buf_out, input_token->value, input_token->length);
return (0);
}
/*
* verify that buff_in points to a sequence of der encoding. The mech
* set is the only sequence of encoded object in the token, so if it is
* a sequence of encoding, decode the mechset into a gss_OID_set and
* return it, advancing the buffer pointer.
*/
static gss_OID_set
get_mech_set(OM_uint32 *minor_status, unsigned char **buff_in,
unsigned int buff_length)
{
gss_OID_set returned_mechSet;
OM_uint32 major_status;
int length;
unsigned int bytes;
OM_uint32 set_length;
unsigned char *start;
int i;
if (**buff_in != SEQUENCE_OF)
return (NULL);
start = *buff_in;
(*buff_in)++;
length = gssint_get_der_length(buff_in, buff_length, &bytes);
if (length < 0 || buff_length - bytes < (unsigned int)length)
return NULL;
major_status = gss_create_empty_oid_set(minor_status,
&returned_mechSet);
if (major_status != GSS_S_COMPLETE)
return (NULL);
for (set_length = 0, i = 0; set_length < (unsigned int)length; i++) {
gss_OID_desc *temp = get_mech_oid(minor_status, buff_in,
buff_length - (*buff_in - start));
if (temp == NULL)
break;
major_status = gss_add_oid_set_member(minor_status,
temp, &returned_mechSet);
if (major_status == GSS_S_COMPLETE) {
set_length += returned_mechSet->elements[i].length +2;
if (generic_gss_release_oid(minor_status, &temp))
map_errcode(minor_status);
}
}
return (returned_mechSet);
}
/*
* Encode mechSet into buf.
*/
static int
put_mech_set(gss_OID_set mechSet, gss_buffer_t buf)
{
unsigned char *ptr;
unsigned int i;
unsigned int tlen, ilen;
tlen = ilen = 0;
for (i = 0; i < mechSet->count; i++) {
/*
* 0x06 [DER LEN] [OID]
*/
ilen += 1 +
gssint_der_length_size(mechSet->elements[i].length) +
mechSet->elements[i].length;
}
/*
* 0x30 [DER LEN]
*/
tlen = 1 + gssint_der_length_size(ilen) + ilen;
ptr = gssalloc_malloc(tlen);
if (ptr == NULL)
return -1;
buf->value = ptr;
buf->length = tlen;
#define REMAIN (buf->length - ((unsigned char *)buf->value - ptr))
*ptr++ = SEQUENCE_OF;
if (gssint_put_der_length(ilen, &ptr, REMAIN) < 0)
return -1;
for (i = 0; i < mechSet->count; i++) {
if (put_mech_oid(&ptr, &mechSet->elements[i], REMAIN) < 0) {
return -1;
}
}
return 0;
#undef REMAIN
}
/*
* Verify that buff_in is pointing to a BIT_STRING with the correct
* length and padding for the req_flags. If it is, decode req_flags
* and return them, otherwise, return NULL.
*/
static OM_uint32
get_req_flags(unsigned char **buff_in, OM_uint32 bodysize,
OM_uint32 *req_flags)
{
unsigned int len;
if (**buff_in != (CONTEXT | 0x01))
return (0);
if (g_get_tag_and_length(buff_in, (CONTEXT | 0x01),
bodysize, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_LENGTH)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_PADDING)
return GSS_S_DEFECTIVE_TOKEN;
*req_flags = (OM_uint32) (*(*buff_in)++ >> 1);
return (0);
}
static OM_uint32
get_negTokenInit(OM_uint32 *minor_status,
gss_buffer_t buf,
gss_buffer_t der_mechSet,
gss_OID_set *mechSet,
OM_uint32 *req_flags,
gss_buffer_t *mechtok,
gss_buffer_t *mechListMIC)
{
OM_uint32 err;
unsigned char *ptr, *bufstart;
unsigned int len;
gss_buffer_desc tmpbuf;
*minor_status = 0;
der_mechSet->length = 0;
der_mechSet->value = NULL;
*mechSet = GSS_C_NO_OID_SET;
*req_flags = 0;
*mechtok = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
if ((buf->length - (ptr - bufstart)) > INT_MAX)
return GSS_S_FAILURE;
#define REMAIN (buf->length - (ptr - bufstart))
err = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (err) {
*minor_status = err;
map_errcode(minor_status);
return GSS_S_FAILURE;
}
*minor_status = g_verify_neg_token_init(&ptr, REMAIN);
if (*minor_status) {
map_errcode(minor_status);
return GSS_S_FAILURE;
}
/* alias into input_token */
tmpbuf.value = ptr;
tmpbuf.length = REMAIN;
*mechSet = get_mech_set(minor_status, &ptr, REMAIN);
if (*mechSet == NULL)
return GSS_S_FAILURE;
tmpbuf.length = ptr - (unsigned char *)tmpbuf.value;
der_mechSet->value = gssalloc_malloc(tmpbuf.length);
if (der_mechSet->value == NULL)
return GSS_S_FAILURE;
memcpy(der_mechSet->value, tmpbuf.value, tmpbuf.length);
der_mechSet->length = tmpbuf.length;
err = get_req_flags(&ptr, REMAIN, req_flags);
if (err != GSS_S_COMPLETE) {
return err;
}
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x02),
REMAIN, &len) >= 0) {
*mechtok = get_input_token(&ptr, len);
if (*mechtok == GSS_C_NO_BUFFER) {
return GSS_S_FAILURE;
}
}
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x03),
REMAIN, &len) >= 0) {
*mechListMIC = get_input_token(&ptr, len);
if (*mechListMIC == GSS_C_NO_BUFFER) {
return GSS_S_FAILURE;
}
}
return GSS_S_COMPLETE;
#undef REMAIN
}
static OM_uint32
get_negTokenResp(OM_uint32 *minor_status,
unsigned char *buf, unsigned int buflen,
OM_uint32 *negState,
gss_OID *supportedMech,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC)
{
unsigned char *ptr, *bufstart;
unsigned int len;
int tmplen;
unsigned int tag, bytes;
*negState = ACCEPT_DEFECTIVE_TOKEN;
*supportedMech = GSS_C_NO_OID;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf;
#define REMAIN (buflen - (ptr - bufstart))
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x01), REMAIN, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (*ptr++ == SEQUENCE) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
}
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
if (tag == CONTEXT) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
if (g_get_tag_and_length(&ptr, ENUMERATED,
REMAIN, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (len != ENUMERATION_LENGTH)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
return GSS_S_DEFECTIVE_TOKEN;
*negState = *ptr++;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x01)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*supportedMech = get_mech_oid(minor_status, &ptr, REMAIN);
if (*supportedMech == GSS_C_NO_OID)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x02)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*responseToken = get_input_token(&ptr, REMAIN);
if (*responseToken == GSS_C_NO_BUFFER)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x03)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*mechListMIC = get_input_token(&ptr, REMAIN);
if (*mechListMIC == GSS_C_NO_BUFFER)
return GSS_S_DEFECTIVE_TOKEN;
/* Handle Windows 2000 duplicate response token */
if (*responseToken &&
((*responseToken)->length == (*mechListMIC)->length) &&
!memcmp((*responseToken)->value, (*mechListMIC)->value,
(*responseToken)->length)) {
OM_uint32 tmpmin;
gss_release_buffer(&tmpmin, *mechListMIC);
free(*mechListMIC);
*mechListMIC = NULL;
}
}
return GSS_S_COMPLETE;
#undef REMAIN
}
/*
* der encode the passed negResults as an ENUMERATED type and
* place it in buf_out, advancing the buffer.
*/
static int
put_negResult(unsigned char **buf_out, OM_uint32 negResult,
unsigned int buflen)
{
if (buflen < 3)
return (-1);
*(*buf_out)++ = ENUMERATED;
*(*buf_out)++ = ENUMERATION_LENGTH;
*(*buf_out)++ = (unsigned char) negResult;
return (0);
}
/*
* This routine compares the recieved mechset to the mechset that
* this server can support. It looks sequentially through the mechset
* and the first one that matches what the server can support is
* chosen as the negotiated mechanism. If one is found, negResult
* is set to ACCEPT_INCOMPLETE if it's the first mech, REQUEST_MIC if
* it's not the first mech, otherwise we return NULL and negResult
* is set to REJECT. The returned pointer is an alias into
* received->elements and should not be freed.
*
* NOTE: There is currently no way to specify a preference order of
* mechanisms supported by the acceptor.
*/
static gss_OID
negotiate_mech(gss_OID_set supported, gss_OID_set received,
OM_uint32 *negResult)
{
size_t i, j;
for (i = 0; i < received->count; i++) {
gss_OID mech_oid = &received->elements[i];
/* Accept wrong mechanism OID from MS clients */
if (g_OID_equal(mech_oid, &gss_mech_krb5_wrong_oid))
mech_oid = (gss_OID)&gss_mech_krb5_oid;
for (j = 0; j < supported->count; j++) {
if (g_OID_equal(mech_oid, &supported->elements[j])) {
*negResult = (i == 0) ? ACCEPT_INCOMPLETE :
REQUEST_MIC;
return &received->elements[i];
}
}
}
*negResult = REJECT;
return (NULL);
}
/*
* the next two routines make a token buffer suitable for
* spnego_gss_display_status. These currently take the string
* in name and place it in the token. Eventually, if
* spnego_gss_display_status returns valid error messages,
* these routines will be changes to return the error string.
*/
static spnego_token_t
make_spnego_token(const char *name)
{
return (spnego_token_t)strdup(name);
}
static gss_buffer_desc
make_err_msg(const char *name)
{
gss_buffer_desc buffer;
if (name == NULL) {
buffer.length = 0;
buffer.value = NULL;
} else {
buffer.length = strlen(name)+1;
buffer.value = make_spnego_token(name);
}
return (buffer);
}
/*
* Create the client side spnego token passed back to gss_init_sec_context
* and eventually up to the application program and over to the server.
*
* Use DER rules, definite length method per RFC 2478
*/
static int
make_spnego_tokenInit_msg(spnego_gss_ctx_id_t spnego_ctx,
int negHintsCompat,
gss_buffer_t mechListMIC, OM_uint32 req_flags,
gss_buffer_t data, send_token_flag sendtoken,
gss_buffer_t outbuf)
{
int ret = 0;
unsigned int tlen, dataLen = 0;
unsigned int negTokenInitSize = 0;
unsigned int negTokenInitSeqSize = 0;
unsigned int negTokenInitContSize = 0;
unsigned int rspTokenSize = 0;
unsigned int mechListTokenSize = 0;
unsigned int micTokenSize = 0;
unsigned char *t;
unsigned char *ptr;
if (outbuf == GSS_C_NO_BUFFER)
return (-1);
outbuf->length = 0;
outbuf->value = NULL;
/* calculate the data length */
/*
* 0xa0 [DER LEN] [mechTypes]
*/
mechListTokenSize = 1 +
gssint_der_length_size(spnego_ctx->DER_mechTypes.length) +
spnego_ctx->DER_mechTypes.length;
dataLen += mechListTokenSize;
/*
* If a token from gss_init_sec_context exists,
* add the length of the token + the ASN.1 overhead
*/
if (data != NULL) {
/*
* Encoded in final output as:
* 0xa2 [DER LEN] 0x04 [DER LEN] [DATA]
* -----s--------|--------s2----------
*/
rspTokenSize = 1 +
gssint_der_length_size(data->length) +
data->length;
dataLen += 1 + gssint_der_length_size(rspTokenSize) +
rspTokenSize;
}
if (mechListMIC) {
/*
* Encoded in final output as:
* 0xa3 [DER LEN] 0x04 [DER LEN] [DATA]
* --s-- -----tlen------------
*/
micTokenSize = 1 +
gssint_der_length_size(mechListMIC->length) +
mechListMIC->length;
dataLen += 1 +
gssint_der_length_size(micTokenSize) +
micTokenSize;
}
/*
* Add size of DER encoding
* [ SEQUENCE { MechTypeList | ReqFLags | Token | mechListMIC } ]
* 0x30 [DER_LEN] [data]
*
*/
negTokenInitContSize = dataLen;
negTokenInitSeqSize = 1 + gssint_der_length_size(dataLen) + dataLen;
dataLen = negTokenInitSeqSize;
/*
* negTokenInitSize indicates the bytes needed to
* hold the ASN.1 encoding of the entire NegTokenInit
* SEQUENCE.
* 0xa0 [DER_LEN] + data
*
*/
negTokenInitSize = 1 +
gssint_der_length_size(negTokenInitSeqSize) +
negTokenInitSeqSize;
tlen = g_token_size(gss_mech_spnego, negTokenInitSize);
t = (unsigned char *) gssalloc_malloc(tlen);
if (t == NULL) {
return (-1);
}
ptr = t;
/* create the message */
if ((ret = g_make_token_header(gss_mech_spnego, negTokenInitSize,
&ptr, tlen)))
goto errout;
*ptr++ = CONTEXT; /* NegotiationToken identifier */
if ((ret = gssint_put_der_length(negTokenInitSeqSize, &ptr, tlen)))
goto errout;
*ptr++ = SEQUENCE;
if ((ret = gssint_put_der_length(negTokenInitContSize, &ptr,
tlen - (int)(ptr-t))))
goto errout;
*ptr++ = CONTEXT | 0x00; /* MechTypeList identifier */
if ((ret = gssint_put_der_length(spnego_ctx->DER_mechTypes.length,
&ptr, tlen - (int)(ptr-t))))
goto errout;
/* We already encoded the MechSetList */
(void) memcpy(ptr, spnego_ctx->DER_mechTypes.value,
spnego_ctx->DER_mechTypes.length);
ptr += spnego_ctx->DER_mechTypes.length;
if (data != NULL) {
*ptr++ = CONTEXT | 0x02;
if ((ret = gssint_put_der_length(rspTokenSize,
&ptr, tlen - (int)(ptr - t))))
goto errout;
if ((ret = put_input_token(&ptr, data,
tlen - (int)(ptr - t))))
goto errout;
}
if (mechListMIC != GSS_C_NO_BUFFER) {
*ptr++ = CONTEXT | 0x03;
if ((ret = gssint_put_der_length(micTokenSize,
&ptr, tlen - (int)(ptr - t))))
goto errout;
if (negHintsCompat) {
ret = put_neg_hints(&ptr, mechListMIC,
tlen - (int)(ptr - t));
if (ret)
goto errout;
} else if ((ret = put_input_token(&ptr, mechListMIC,
tlen - (int)(ptr - t))))
goto errout;
}
errout:
if (ret != 0) {
if (t)
free(t);
t = NULL;
tlen = 0;
}
outbuf->length = tlen;
outbuf->value = (void *) t;
return (ret);
}
/*
* create the server side spnego token passed back to
* gss_accept_sec_context and eventually up to the application program
* and over to the client.
*/
static int
make_spnego_tokenTarg_msg(OM_uint32 status, gss_OID mech_wanted,
gss_buffer_t data, gss_buffer_t mechListMIC,
send_token_flag sendtoken,
gss_buffer_t outbuf)
{
unsigned int tlen = 0;
unsigned int ret = 0;
unsigned int NegTokenTargSize = 0;
unsigned int NegTokenSize = 0;
unsigned int rspTokenSize = 0;
unsigned int micTokenSize = 0;
unsigned int dataLen = 0;
unsigned char *t;
unsigned char *ptr;
if (outbuf == GSS_C_NO_BUFFER)
return (GSS_S_DEFECTIVE_TOKEN);
if (sendtoken == INIT_TOKEN_SEND && mech_wanted == GSS_C_NO_OID)
return (GSS_S_DEFECTIVE_TOKEN);
outbuf->length = 0;
outbuf->value = NULL;
/*
* ASN.1 encoding of the negResult
* ENUMERATED type is 3 bytes
* ENUMERATED TAG, Length, Value,
* Plus 2 bytes for the CONTEXT id and length.
*/
dataLen = 5;
/*
* calculate data length
*
* If this is the initial token, include length of
* mech_type and the negotiation result fields.
*/
if (sendtoken == INIT_TOKEN_SEND) {
int mechlistTokenSize;
/*
* 1 byte for the CONTEXT ID(0xa0),
* 1 byte for the OID ID(0x06)
* 1 byte for OID Length field
* Plus the rest... (OID Length, OID value)
*/
mechlistTokenSize = 3 + mech_wanted->length +
gssint_der_length_size(mech_wanted->length);
dataLen += mechlistTokenSize;
}
if (data != NULL && data->length > 0) {
/* Length of the inner token */
rspTokenSize = 1 + gssint_der_length_size(data->length) +
data->length;
dataLen += rspTokenSize;
/* Length of the outer token */
dataLen += 1 + gssint_der_length_size(rspTokenSize);
}
if (mechListMIC != NULL) {
/* Length of the inner token */
micTokenSize = 1 + gssint_der_length_size(mechListMIC->length) +
mechListMIC->length;
dataLen += micTokenSize;
/* Length of the outer token */
dataLen += 1 + gssint_der_length_size(micTokenSize);
}
/*
* Add size of DER encoded:
* NegTokenTarg [ SEQUENCE ] of
* NegResult[0] ENUMERATED {
* accept_completed(0),
* accept_incomplete(1),
* reject(2) }
* supportedMech [1] MechType OPTIONAL,
* responseToken [2] OCTET STRING OPTIONAL,
* mechListMIC [3] OCTET STRING OPTIONAL
*
* size = data->length + MechListMic + SupportedMech len +
* Result Length + ASN.1 overhead
*/
NegTokenTargSize = dataLen;
dataLen += 1 + gssint_der_length_size(NegTokenTargSize);
/*
* NegotiationToken [ CHOICE ]{
* negTokenInit [0] NegTokenInit,
* negTokenTarg [1] NegTokenTarg }
*/
NegTokenSize = dataLen;
dataLen += 1 + gssint_der_length_size(NegTokenSize);
tlen = dataLen;
t = (unsigned char *) gssalloc_malloc(tlen);
if (t == NULL) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
ptr = t;
/*
* Indicate that we are sending CHOICE 1
* (NegTokenTarg)
*/
*ptr++ = CONTEXT | 0x01;
if (gssint_put_der_length(NegTokenSize, &ptr, dataLen) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
*ptr++ = SEQUENCE;
if (gssint_put_der_length(NegTokenTargSize, &ptr,
tlen - (int)(ptr-t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
/*
* First field of the NegTokenTarg SEQUENCE
* is the ENUMERATED NegResult.
*/
*ptr++ = CONTEXT;
if (gssint_put_der_length(3, &ptr,
tlen - (int)(ptr-t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_negResult(&ptr, status, tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (sendtoken == INIT_TOKEN_SEND) {
/*
* Next, is the Supported MechType
*/
*ptr++ = CONTEXT | 0x01;
if (gssint_put_der_length(mech_wanted->length + 2,
&ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_mech_oid(&ptr, mech_wanted,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
if (data != NULL && data->length > 0) {
*ptr++ = CONTEXT | 0x02;
if (gssint_put_der_length(rspTokenSize, &ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_input_token(&ptr, data,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
if (mechListMIC != NULL) {
*ptr++ = CONTEXT | 0x03;
if (gssint_put_der_length(micTokenSize, &ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_input_token(&ptr, mechListMIC,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
ret = GSS_S_COMPLETE;
errout:
if (ret != GSS_S_COMPLETE) {
if (t)
free(t);
} else {
outbuf->length = ptr - t;
outbuf->value = (void *) t;
}
return (ret);
}
/* determine size of token */
static int
g_token_size(gss_OID_const mech, unsigned int body_size)
{
int hdrsize;
/*
* Initialize the header size to the
* MECH_OID byte + the bytes needed to indicate the
* length of the OID + the OID itself.
*
* 0x06 [MECHLENFIELD] MECHDATA
*/
hdrsize = 1 + gssint_der_length_size(mech->length) + mech->length;
/*
* Now add the bytes needed for the initial header
* token bytes:
* 0x60 + [DER_LEN] + HDRSIZE
*/
hdrsize += 1 + gssint_der_length_size(body_size + hdrsize);
return (hdrsize + body_size);
}
/*
* generate token header.
*
* Use DER Definite Length method per RFC2478
* Use of indefinite length encoding will not be compatible
* with Microsoft or others that actually follow the spec.
*/
static int
g_make_token_header(gss_OID_const mech,
unsigned int body_size,
unsigned char **buf,
unsigned int totallen)
{
int ret = 0;
unsigned int hdrsize;
unsigned char *p = *buf;
hdrsize = 1 + gssint_der_length_size(mech->length) + mech->length;
*(*buf)++ = HEADER_ID;
if ((ret = gssint_put_der_length(hdrsize + body_size, buf, totallen)))
return (ret);
*(*buf)++ = MECH_OID;
if ((ret = gssint_put_der_length(mech->length, buf,
totallen - (int)(p - *buf))))
return (ret);
TWRITE_STR(*buf, mech->elements, mech->length);
return (0);
}
/*
* NOTE: This checks that the length returned by
* gssint_get_der_length() is not greater than the number of octets
* remaining, even though gssint_get_der_length() already checks, in
* theory.
*/
static int
g_get_tag_and_length(unsigned char **buf, int tag,
unsigned int buflen, unsigned int *outlen)
{
unsigned char *ptr = *buf;
int ret = -1; /* pessimists, assume failure ! */
unsigned int encoded_len;
int tmplen = 0;
*outlen = 0;
if (buflen > 1 && *ptr == tag) {
ptr++;
tmplen = gssint_get_der_length(&ptr, buflen - 1,
&encoded_len);
if (tmplen < 0) {
ret = -1;
} else if ((unsigned int)tmplen > buflen - (ptr - *buf)) {
ret = -1;
} else
ret = 0;
}
*outlen = tmplen;
*buf = ptr;
return (ret);
}
static int
g_verify_neg_token_init(unsigned char **buf_in, unsigned int cur_size)
{
unsigned char *buf = *buf_in;
unsigned char *endptr = buf + cur_size;
int seqsize;
int ret = 0;
unsigned int bytes;
/*
* Verify this is a NegotiationToken type token
* - check for a0(context specific identifier)
* - get length and verify that enoughd ata exists
*/
if (g_get_tag_and_length(&buf, CONTEXT, cur_size, &bytes) < 0)
return (G_BAD_TOK_HEADER);
cur_size = bytes; /* should indicate bytes remaining */
/*
* Verify the next piece, it should identify this as
* a strucure of type NegTokenInit.
*/
if (*buf++ == SEQUENCE) {
if ((seqsize = gssint_get_der_length(&buf, cur_size, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
/*
* Make sure we have the entire buffer as described
*/
if (seqsize > endptr - buf)
return (G_BAD_TOK_HEADER);
} else {
return (G_BAD_TOK_HEADER);
}
cur_size = seqsize; /* should indicate bytes remaining */
/*
* Verify that the first blob is a sequence of mechTypes
*/
if (*buf++ == CONTEXT) {
if ((seqsize = gssint_get_der_length(&buf, cur_size, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
/*
* Make sure we have the entire buffer as described
*/
if (seqsize > endptr - buf)
return (G_BAD_TOK_HEADER);
} else {
return (G_BAD_TOK_HEADER);
}
/*
* At this point, *buf should be at the beginning of the
* DER encoded list of mech types that are to be negotiated.
*/
*buf_in = buf;
return (ret);
}
/* verify token header. */
static int
g_verify_token_header(gss_OID_const mech,
unsigned int *body_size,
unsigned char **buf_in,
int tok_type,
unsigned int toksize)
{
unsigned char *buf = *buf_in;
int seqsize;
gss_OID_desc toid;
int ret = 0;
unsigned int bytes;
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != HEADER_ID)
return (G_BAD_TOK_HEADER);
if ((seqsize = gssint_get_der_length(&buf, toksize, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
if ((seqsize + bytes) != toksize)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != MECH_OID)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
toid.length = *buf++;
if (toksize < toid.length)
return (G_BAD_TOK_HEADER);
else
toksize -= toid.length;
toid.elements = buf;
buf += toid.length;
if (!g_OID_equal(&toid, mech))
ret = G_WRONG_MECH;
/*
* G_WRONG_MECH is not returned immediately because it's more important
* to return G_BAD_TOK_HEADER if the token header is in fact bad
*/
if (toksize < 2)
return (G_BAD_TOK_HEADER);
else
toksize -= 2;
if (!ret) {
*buf_in = buf;
*body_size = toksize;
}
return (ret);
}
/*
* Return non-zero if the oid is one of the kerberos mech oids,
* otherwise return zero.
*
* N.B. There are 3 oids that represent the kerberos mech:
* RFC-specified GSS_MECH_KRB5_OID,
* Old pre-RFC GSS_MECH_KRB5_OLD_OID,
* Incorrect MS GSS_MECH_KRB5_WRONG_OID
*/
static int
is_kerb_mech(gss_OID oid)
{
int answer = 0;
OM_uint32 minor;
extern const gss_OID_set_desc * const gss_mech_set_krb5_both;
(void) gss_test_oid_set_member(&minor,
oid, (gss_OID_set)gss_mech_set_krb5_both, &answer);
return (answer);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_2199_0 |
crossvul-cpp_data_good_5108_3 | /* packet-usb-video.c
*
* Forked from packet-usb-masstorage.c 35224 2010-12-20 05:35:29Z guy
* which was authored by Ronnie Sahlberg (2006)
*
* usb video dissector
* Steven J. Magnani 2013
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "config.h"
#include <epan/packet.h>
#include <epan/expert.h>
#include "packet-usb.h"
void proto_register_usb_vid(void);
void proto_reg_handoff_usb_vid(void);
/* References are to sections in USB Video Class specifications -
* specifically V1.5, but versions have tended to keep
* the same numbering (as of this writing).
*
* http://www.usb.org/developers/devclass_docs/USB_Video_Class_1_5.zip
*/
/* Table 2-1. Interrupt originators */
#define INT_VIDEOCONTROL 1
#define INT_VIDEOSTREAMING 2
#define INT_ORIGINATOR_MASK 0xF
/* Table 2-2. Video Control Status Packet bAttribute */
#define CONTROL_CHANGE_VALUE 0x00
#define CONTROL_CHANGE_INFO 0x01
#define CONTROL_CHANGE_FAILURE 0x02
#define CONTROL_CHANGE_MIN 0x03 /* UVC 1.5+ */
#define CONTROL_CHANGE_MAX 0x04 /* UVC 1.5+ */
/* A.2 Video Interface Subclass Codes */
#define SC_UNDEFINED 0
#define SC_VIDEOCONTROL 1
#define SC_VIDEOSTREAMING 2
#define SC_VIDEO_INTERFACE_COLLECTION 3
/* A.4. Video Class-Specific Descriptor Types */
#define CS_INTERFACE 0x24
#define CS_ENDPOINT 0x25
/* A.5 Video Class-Specific VC Interface Descriptor Subtypes */
#define VC_HEADER 1
#define VC_INPUT_TERMINAL 2
#define VC_OUTPUT_TERMINAL 3
#define VC_SELECTOR_UNIT 4
#define VC_PROCESSING_UNIT 5
#define VC_EXTENSION_UNIT 6
#define VC_ENCODING_UNIT 7
/* A.6 Video Class-Specific VS Interface Descriptor Subtypes */
#define VS_UNDEFINED 0x00
#define VS_INPUT_HEADER 0x01
#define VS_OUTPUT_HEADER 0x02
#define VS_STILL_IMAGE_FRAME 0x03
#define VS_FORMAT_UNCOMPRESSED 0x04
#define VS_FRAME_UNCOMPRESSED 0x05
#define VS_FORMAT_MJPEG 0x06
#define VS_FRAME_MJPEG 0x07
#define VS_FORMAT_MPEG1 0x08 /* Pre-UVC 1.1 */
#define VS_FORMAT_MPEG2PS 0x09 /* Pre-UVC 1.1 */
#define VS_FORMAT_MPEG2TS 0x0A
#define VS_FORMAT_MPEG4SL 0x0B /* Pre-UVC 1.1 */
#define VS_FORMAT_DV 0x0C
#define VS_COLORFORMAT 0x0D
#define VS_FORMAT_VENDOR 0x0E /* Pre-UVC 1.1 */
#define VS_FRAME_VENDOR 0x0F /* Pre-UVC 1.1 */
#define VS_FORMAT_FRAME_BASED 0x10
#define VS_FRAME_FRAME_BASED 0x11
#define VS_FORMAT_STREAM_BASED 0x12
#define VS_FORMAT_H264 0x13 /* UVC 1.5 */
#define VS_FRAME_H264 0x14 /* UVC 1.5 */
#define VS_FORMAT_H264_SIMULCAST 0x15 /* UVC 1.5 */
#define VS_FORMAT_VP8 0x16 /* UVC 1.5 */
#define VS_FRAME_VP8 0x17 /* UVC 1.5 */
#define VS_FORMAT_VP8_SIMULCAST 0x18 /* UVC 1.5 */
/* A.7 Video Class-Specific Endpoint Descriptor Subtypes */
#define EP_INTERRUPT 0x03
/* A.9.1 Video Control Interface Control Selectors */
#define VC_CONTROL_UNDEFINED 0x00
#define VC_VIDEO_POWER_MODE_CONTROL 0x01
#define VC_REQUEST_ERROR_CODE_CONTROL 0x02
#define VC_REQUEST_INDICATE_HOST_CLOCK_CONTROL 0x03 /* Pre-UVC 1.1 */
/* A.9.3 Selector Unit Control Selectors */
#define SU_CONTROL_UNDEFINED 0x00
#define SU_INPUT_SELECT_CONTROL 0x01
/* A.9.4 Camera Terminal Control Selectors */
#define CT_CONTROL_UNDEFINED 0x00
#define CT_SCANNING_MODE_CONTROL 0x01
#define CT_AE_MODE_CONTROL 0x02
#define CT_AE_PRIORITY_CONTROL 0x03
#define CT_EXPOSURE_TIME_ABSOLUTE_CONTROL 0x04
#define CT_EXPOSURE_TIME_RELATIVE_CONTROL 0x05
#define CT_FOCUS_ABSOLUTE_CONTROL 0x06
#define CT_FOCUS_RELATIVE_CONTROL 0x07
#define CT_FOCUS_AUTO_CONTROL 0x08
#define CT_IRIS_ABSOLUTE_CONTROL 0x09
#define CT_IRIS_RELATIVE_CONTROL 0x0A
#define CT_ZOOM_ABSOLUTE_CONTROL 0x0B
#define CT_ZOOM_RELATIVE_CONTROL 0x0C
#define CT_PANTILT_ABSOLUTE_CONTROL 0x0D
#define CT_PANTILT_RELATIVE_CONTROL 0x0E
#define CT_ROLL_ABSOLUTE_CONTROL 0x0F
#define CT_ROLL_RELATIVE_CONTROL 0x10
#define CT_PRIVACY_CONTROL 0x11
#define CT_FOCUS_SIMPLE_CONTROL 0x12 /* UVC 1.5 */
#define CT_WINDOW_CONTROL 0x13 /* UVC 1.5 */
#define CT_REGION_OF_INTEREST_CONTROL 0x14 /* UVC 1.5 */
/* A.9.5 Processing Unit Control Selectors */
#define PU_CONTROL_UNDEFINED 0x00
#define PU_BACKLIGHT_COMPENSATION_CONTROL 0x01
#define PU_BRIGHTNESS_CONTROL 0x02
#define PU_CONTRAST_CONTROL 0x03
#define PU_GAIN_CONTROL 0x04
#define PU_POWER_LINE_FREQUENCY_CONTROL 0x05
#define PU_HUE_CONTROL 0x06
#define PU_SATURATION_CONTROL 0x07
#define PU_SHARPNESS_CONTROL 0x08
#define PU_GAMMA_CONTROL 0x09
#define PU_WHITE_BALANCE_TEMPERATURE_CONTROL 0x0A
#define PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL 0x0B
#define PU_WHITE_BALANCE_COMPONENT_CONTROL 0x0C
#define PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL 0x0D
#define PU_DIGITAL_MULTIPLIER_CONTROL 0x0E
#define PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL 0x0F
#define PU_HUE_AUTO_CONTROL 0x10
#define PU_ANALOG_VIDEO_STANDARD_CONTROL 0x11
#define PU_ANALOG_LOCK_STATUS_CONTROL 0x12
#define PU_CONTRAST_AUTO_CONTROL 0x13
/* A.9.7 VideoStreaming Interface Control Selectors */
#define VS_CONTROL_UNDEFINED 0x00
#define VS_PROBE_CONTROL 0x01
#define VS_COMMIT_CONTROL 0x02
#define VS_STILL_PROBE_CONTROL 0x03
#define VS_STILL_COMMIT_CONTROL 0x04
#define VS_STILL_IMAGE_TRIGGER_CONTROL 0x05
#define VS_STREAM_ERROR_CODE_CONTROL 0x06
#define VS_GENERATE_KEY_FRAME_CONTROL 0x07
#define VS_UPDATE_FRAME_SEGMENT_CONTROL 0x08
#define VS_SYNCH_DELAY_CONTROL 0x09
/* Appendix B Terminal Types */
#define TT_VENDOR_SPECIFIC 0x100
#define TT_STREAMING 0x101
#define ITT_VENDOR_SPECIFIC 0x200
#define ITT_CAMERA 0x201
#define ITT_MEDIA_TRANSPORT_INPUT 0x202
#define OTT_VENDOR_SPECIFIC 0x300
#define OTT_DISPLAY 0x301
#define OTT_MEDIA_TRANSPORT_OUTPUT 0x302
#define EXTERNAL_VENDOR_SPECIFIC 0x400
#define COMPOSITE_CONNECTOR 0x401
#define SVIDEO_CONNECTOR 0x402
#define COMPONENT_CONNECTOR 0x403
/* Table 2-2 Status Packet Format (VideoControl Interface as the Originator) */
#define CONTROL_INTERRUPT_EVENT_CONTROL_CHANGE 0
/* Table 4-7 Request Error Code Control bRequestErrorCode */
#define UVC_ERROR_NONE 0
#define UVC_ERROR_NOT_READY 1
#define UVC_ERROR_WRONG_STATE 2
#define UVC_ERROR_POWER 3
#define UVC_ERROR_OUT_OF_RANGE 4
#define UVC_ERROR_INVALID_UNIT 5
#define UVC_ERROR_INVALID_CONTROL 6
#define UVC_ERROR_INVALID_REQUEST 7
#define UVC_ERROR_INVALID_VALUE 8
#define UVC_ERROR_UNKNOWN 255
/* A.8 Video Class-Specific Request Codes */
#define USB_SETUP_SET_CUR 0x01
#define USB_SETUP_SET_CUR_ALL 0x11 /* UVC 1.5 */
#define USB_SETUP_GET_CUR 0x81
#define USB_SETUP_GET_MIN 0x82
#define USB_SETUP_GET_MAX 0x83
#define USB_SETUP_GET_RES 0x84
#define USB_SETUP_GET_LEN 0x85
#define USB_SETUP_GET_INFO 0x86
#define USB_SETUP_GET_DEF 0x87
#define USB_SETUP_GET_CUR_ALL 0x91 /* UVC 1.5 */
#define USB_SETUP_GET_MIN_ALL 0x92 /* UVC 1.5 */
#define USB_SETUP_GET_MAX_ALL 0x93 /* UVC 1.5 */
#define USB_SETUP_GET_RES_ALL 0x94 /* UVC 1.5 */
#define USB_SETUP_GET_DEF_ALL 0x97 /* UVC 1.5 */
/* protocols and header fields */
static int proto_usb_vid = -1;
static int hf_usb_vid_control_entity = -1;
static int hf_usb_vid_control_interface = -1;
static int hf_usb_vid_control_selector = -1;
static int hf_usb_vid_epdesc_subtype = -1;
static int hf_usb_vid_epdesc_max_transfer_sz = -1;
static int hf_usb_vid_control_ifdesc_subtype = -1;
static int hf_usb_vid_control_ifdesc_terminal_id = -1;
static int hf_usb_vid_control_ifdesc_terminal_type = -1;
static int hf_usb_vid_control_ifdesc_assoc_terminal = -1;
static int hf_usb_vid_streaming_ifdesc_subtype = -1;
static int hf_usb_vid_streaming_ifdesc_bNumFormats = -1;
static int hf_usb_vid_control_ifdesc_unit_id = -1;
static int hf_usb_vid_request = -1;
static int hf_usb_vid_length = -1;
static int hf_usb_vid_interrupt_bStatusType = -1;
static int hf_usb_vid_interrupt_bOriginator = -1;
static int hf_usb_vid_interrupt_bAttribute = -1;
static int hf_usb_vid_control_interrupt_bEvent = -1;
static int hf_usb_vid_control_ifdesc_bcdUVC = -1;
static int hf_usb_vid_ifdesc_wTotalLength = -1;
static int hf_usb_vid_control_ifdesc_dwClockFrequency = -1;
static int hf_usb_vid_control_ifdesc_bInCollection = -1;
static int hf_usb_vid_control_ifdesc_baInterfaceNr = -1;
static int hf_usb_vid_control_ifdesc_iTerminal = -1;
static int hf_usb_vid_control_ifdesc_src_id = -1;
static int hf_usb_vid_cam_objective_focal_len_min = -1;
static int hf_usb_vid_cam_objective_focal_len_max = -1;
static int hf_usb_vid_cam_ocular_focal_len = -1;
static int hf_usb_vid_bControlSize = -1;
static int hf_usb_vid_bmControl = -1;
static int hf_usb_vid_bmControl_bytes = -1;
static int hf_usb_vid_control_default = -1;
static int hf_usb_vid_control_min = -1;
static int hf_usb_vid_control_max = -1;
static int hf_usb_vid_control_res = -1;
static int hf_usb_vid_control_cur = -1;
static int hf_usb_vid_control_info = -1;
static int hf_usb_vid_control_info_D[7] = { -1, -1, -1, -1, -1, -1, -1 };
static int hf_usb_vid_control_length = -1;
static int hf_usb_vid_cam_control_D[22] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1 };
static int hf_usb_vid_proc_control_D[19] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1 };
static int hf_usb_vid_proc_standards_D[6] = { -1, -1, -1, -1, -1, -1 };
static int hf_usb_vid_exten_guid = -1;
static int hf_usb_vid_exten_num_controls = -1;
static int hf_usb_vid_num_inputs = -1;
static int hf_usb_vid_sources = -1;
static int hf_usb_vid_streaming_bmInfo = -1;
static int hf_usb_vid_streaming_info_D[1] = { -1 };
static int hf_usb_vid_streaming_terminal_link = -1;
static int hf_usb_vid_streaming_still_capture_method = -1;
static int hf_usb_vid_streaming_trigger_support = -1;
static int hf_usb_vid_streaming_trigger_usage = -1;
static int hf_usb_vid_streaming_control_D[6] = { -1, -1, -1, -1, -1, -1 };
static int hf_usb_vid_format_index = -1;
static int hf_usb_vid_format_num_frame_descriptors = -1;
static int hf_usb_vid_format_guid = -1;
static int hf_usb_vid_format_bits_per_pixel = -1;
static int hf_usb_vid_default_frame_index = -1;
static int hf_usb_vid_aspect_ratio_x = -1;
static int hf_usb_vid_aspect_ratio_y = -1;
static int hf_usb_vid_interlace_flags = -1;
static int hf_usb_vid_is_interlaced = -1;
static int hf_usb_vid_interlaced_fields = -1;
static int hf_usb_vid_field_1_first = -1;
static int hf_usb_vid_field_pattern = -1;
static int hf_usb_vid_copy_protect = -1;
static int hf_usb_vid_variable_size = -1;
static int hf_usb_vid_frame_index = -1;
static int hf_usb_vid_frame_capabilities = -1;
static int hf_usb_vid_frame_stills_supported = -1;
static int hf_usb_vid_frame_fixed_frame_rate = -1;
static int hf_usb_vid_frame_width = -1;
static int hf_usb_vid_frame_height = -1;
static int hf_usb_vid_frame_min_bit_rate = -1;
static int hf_usb_vid_frame_max_bit_rate = -1;
static int hf_usb_vid_frame_max_frame_sz = -1;
static int hf_usb_vid_frame_default_interval = -1;
static int hf_usb_vid_frame_bytes_per_line = -1;
static int hf_usb_vid_mjpeg_flags = -1;
static int hf_usb_vid_mjpeg_fixed_samples = -1;
static int hf_usb_vid_probe_hint = -1;
static int hf_usb_vid_probe_hint_D[5] = { -1, -1, -1, -1, -1 };
static int hf_usb_vid_frame_interval = -1;
static int hf_usb_vid_probe_key_frame_rate = -1;
static int hf_usb_vid_probe_p_frame_rate = -1;
static int hf_usb_vid_probe_comp_quality = -1;
static int hf_usb_vid_probe_comp_window = -1;
static int hf_usb_vid_probe_delay = -1;
static int hf_usb_vid_probe_max_frame_sz = -1;
static int hf_usb_vid_probe_max_payload_sz = -1;
static int hf_usb_vid_probe_clock_freq = -1;
static int hf_usb_vid_probe_framing = -1;
static int hf_usb_vid_probe_framing_D[2] = { -1, -1 };
static int hf_usb_vid_probe_preferred_ver = -1;
static int hf_usb_vid_probe_min_ver = -1;
static int hf_usb_vid_probe_max_ver = -1;
static int hf_usb_vid_frame_interval_type = -1;
static int hf_usb_vid_frame_min_interval = -1;
static int hf_usb_vid_frame_max_interval = -1;
static int hf_usb_vid_frame_step_interval = -1;
static int hf_usb_vid_color_primaries = -1;
static int hf_usb_vid_transfer_characteristics = -1;
static int hf_usb_vid_matrix_coefficients = -1;
static int hf_usb_vid_max_multiplier = -1;
static int hf_usb_vid_iProcessing = -1;
static int hf_usb_vid_iExtension = -1;
static int hf_usb_vid_iSelector = -1;
static int hf_usb_vid_proc_standards = -1;
static int hf_usb_vid_request_error = -1;
static int hf_usb_vid_descriptor_data = -1;
static int hf_usb_vid_control_data = -1;
static int hf_usb_vid_control_value = -1;
static int hf_usb_vid_value_data = -1;
/* Subtrees */
static gint ett_usb_vid = -1;
static gint ett_descriptor_video_endpoint = -1;
static gint ett_descriptor_video_control = -1;
static gint ett_descriptor_video_streaming = -1;
static gint ett_camera_controls = -1;
static gint ett_processing_controls = -1;
static gint ett_streaming_controls = -1;
static gint ett_streaming_info = -1;
static gint ett_interlace_flags = -1;
static gint ett_frame_capability_flags = -1;
static gint ett_mjpeg_flags = -1;
static gint ett_video_probe = -1;
static gint ett_probe_hint = -1;
static gint ett_probe_framing = -1;
static gint ett_video_standards = -1;
static gint ett_control_capabilities = -1;
static expert_field ei_usb_vid_subtype_unknown = EI_INIT;
static expert_field ei_usb_vid_bitmask_len = EI_INIT;
/* Lookup tables */
static const value_string vc_ep_descriptor_subtypes[] = {
{ EP_INTERRUPT, "Interrupt" },
{ 0, NULL }
};
static const value_string vid_descriptor_type_vals[] = {
{CS_INTERFACE, "video class interface"},
{CS_ENDPOINT, "video class endpoint"},
{0,NULL}
};
static value_string_ext vid_descriptor_type_vals_ext =
VALUE_STRING_EXT_INIT(vid_descriptor_type_vals);
static const value_string vc_if_descriptor_subtypes[] = {
{ VC_HEADER, "Header" },
{ VC_INPUT_TERMINAL, "Input Terminal" },
{ VC_OUTPUT_TERMINAL, "Output Terminal" },
{ VC_SELECTOR_UNIT, "Selector Unit" },
{ VC_PROCESSING_UNIT, "Processing Unit" },
{ VC_EXTENSION_UNIT, "Extension Unit" },
{ VC_ENCODING_UNIT, "Encoding Unit" },
{ 0, NULL }
};
static value_string_ext vc_if_descriptor_subtypes_ext =
VALUE_STRING_EXT_INIT(vc_if_descriptor_subtypes);
static const value_string cs_control_interface[] = {
{ VC_CONTROL_UNDEFINED, "Undefined" },
{ VC_VIDEO_POWER_MODE_CONTROL, "Video Power Mode" },
{ VC_REQUEST_ERROR_CODE_CONTROL, "Request Error Code" },
{ VC_REQUEST_INDICATE_HOST_CLOCK_CONTROL, "Request Indicate Host Clock" },
{ 0, NULL }
};
static value_string_ext cs_control_interface_ext =
VALUE_STRING_EXT_INIT(cs_control_interface);
static const value_string cs_streaming_interface[] = {
{ VS_CONTROL_UNDEFINED, "Undefined" },
{ VS_PROBE_CONTROL, "Probe" },
{ VS_COMMIT_CONTROL, "Commit" },
{ VS_STILL_PROBE_CONTROL, "Still Probe" },
{ VS_STILL_COMMIT_CONTROL, "Still Commit" },
{ VS_STILL_IMAGE_TRIGGER_CONTROL, "Still Image Trigger" },
{ VS_STREAM_ERROR_CODE_CONTROL, "Stream Error Code" },
{ VS_GENERATE_KEY_FRAME_CONTROL, "Generate Key Frame" },
{ VS_UPDATE_FRAME_SEGMENT_CONTROL, "Update Frame Segment" },
{ VS_SYNCH_DELAY_CONTROL, "Synch Delay" },
{ 0, NULL }
};
static value_string_ext cs_streaming_interface_ext =
VALUE_STRING_EXT_INIT(cs_streaming_interface);
static const value_string cs_selector_unit[] = {
{ SU_CONTROL_UNDEFINED, "Undefined" },
{ SU_INPUT_SELECT_CONTROL, "Input Select" },
{ 0, NULL }
};
static value_string_ext cs_selector_unit_ext =
VALUE_STRING_EXT_INIT(cs_selector_unit);
static const value_string cs_camera_terminal[] = {
{ CT_CONTROL_UNDEFINED, "Undefined" },
{ CT_SCANNING_MODE_CONTROL, "Scanning Mode" },
{ CT_AE_MODE_CONTROL, "Auto-Exposure Mode" },
{ CT_AE_PRIORITY_CONTROL, "Auto-Exposure Priority" },
{ CT_EXPOSURE_TIME_ABSOLUTE_CONTROL, "Exposure Time (Absolute)" },
{ CT_EXPOSURE_TIME_RELATIVE_CONTROL, "Exposure Time (Relative)" },
{ CT_FOCUS_ABSOLUTE_CONTROL, "Focus (Absolute)" },
{ CT_FOCUS_RELATIVE_CONTROL, "Focus (Relative)" },
{ CT_FOCUS_AUTO_CONTROL, "Focus, Auto" },
{ CT_IRIS_ABSOLUTE_CONTROL, "Iris (Absolute)" },
{ CT_IRIS_RELATIVE_CONTROL, "Iris (Relative)" },
{ CT_ZOOM_ABSOLUTE_CONTROL, "Zoom (Absolute)" },
{ CT_ZOOM_RELATIVE_CONTROL, "Zoom (Relative)" },
{ CT_PANTILT_ABSOLUTE_CONTROL, "PanTilt (Absolute)" },
{ CT_PANTILT_RELATIVE_CONTROL, "PanTilt (Relative)" },
{ CT_ROLL_ABSOLUTE_CONTROL, "Roll (Absolute)" },
{ CT_ROLL_RELATIVE_CONTROL, "Roll (Relative)" },
{ CT_PRIVACY_CONTROL, "Privacy" },
{ CT_FOCUS_SIMPLE_CONTROL, "Focus (Simple)" },
{ CT_WINDOW_CONTROL, "Window" },
{ CT_REGION_OF_INTEREST_CONTROL, "Region of Interest" },
{ 0, NULL }
};
static value_string_ext cs_camera_terminal_ext =
VALUE_STRING_EXT_INIT(cs_camera_terminal);
static const value_string cs_processing_unit[] = {
{ PU_CONTROL_UNDEFINED, "Undefined" },
{ PU_BACKLIGHT_COMPENSATION_CONTROL, "Backlight Compensation" },
{ PU_BRIGHTNESS_CONTROL, "Brightness" },
{ PU_CONTRAST_CONTROL, "Contrast" },
{ PU_GAIN_CONTROL, "Gain" },
{ PU_POWER_LINE_FREQUENCY_CONTROL, "Power Line Frequency" },
{ PU_HUE_CONTROL, "Hue" },
{ PU_SATURATION_CONTROL, "Saturation" },
{ PU_SHARPNESS_CONTROL, "Sharpness" },
{ PU_GAMMA_CONTROL, "Gamma" },
{ PU_WHITE_BALANCE_TEMPERATURE_CONTROL, "White Balance Temperature" },
{ PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,"White Balance Temperature Auto" },
{ PU_WHITE_BALANCE_COMPONENT_CONTROL, "White Balance Component" },
{ PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL, "White Balance Component Auto" },
{ PU_DIGITAL_MULTIPLIER_CONTROL, "Digital Multiplier" },
{ PU_DIGITAL_MULTIPLIER_LIMIT_CONTROL, "Digital Multiplier Limit" },
{ PU_HUE_AUTO_CONTROL, "Hue Auto" },
{ PU_ANALOG_VIDEO_STANDARD_CONTROL, "Video Standard" },
{ PU_ANALOG_LOCK_STATUS_CONTROL, "Analog Lock Status" },
{ PU_CONTRAST_AUTO_CONTROL, "Contrast Auto" },
{ 0, NULL }
};
static value_string_ext cs_processing_unit_ext =
VALUE_STRING_EXT_INIT(cs_processing_unit);
static const value_string vc_terminal_types[] = {
{ TT_VENDOR_SPECIFIC, "Vendor Specific", },
{ TT_STREAMING, "Streaming" },
{ ITT_VENDOR_SPECIFIC, "Vendor Specific Input" },
{ ITT_CAMERA, "Camera Input" },
{ ITT_MEDIA_TRANSPORT_INPUT, "Media Transport Input" },
{ OTT_VENDOR_SPECIFIC, "Vendor Specific Output" },
{ OTT_DISPLAY, "Display Output" },
{ OTT_MEDIA_TRANSPORT_OUTPUT, "Media Transport Output" },
{ EXTERNAL_VENDOR_SPECIFIC, "Vendor Specific External" },
{ COMPOSITE_CONNECTOR, "Composite Connector" },
{ SVIDEO_CONNECTOR, "SVideo Connector" },
{ COMPONENT_CONNECTOR, "Component Connector" },
{ 0, NULL }
};
static value_string_ext vc_terminal_types_ext =
VALUE_STRING_EXT_INIT(vc_terminal_types);
static const value_string vs_if_descriptor_subtypes[] = {
{ VS_UNDEFINED, "Undefined" },
{ VS_INPUT_HEADER, "Input Header" },
{ VS_OUTPUT_HEADER, "Output Header" },
{ VS_STILL_IMAGE_FRAME, "Still Image Frame" },
{ VS_FORMAT_UNCOMPRESSED, "Format Uncompressed" },
{ VS_FRAME_UNCOMPRESSED, "Frame Uncompressed" },
{ VS_FORMAT_MJPEG, "Format MJPEG" },
{ VS_FRAME_MJPEG, "Frame MJPEG" },
{ VS_FORMAT_MPEG1, "Format MPEG1" },
{ VS_FORMAT_MPEG2PS, "Format MPEG2-PS" },
{ VS_FORMAT_MPEG2TS, "Format MPEG2-TS" },
{ VS_FORMAT_MPEG4SL, "Format MPEG4-SL" },
{ VS_FORMAT_DV, "Format DV" },
{ VS_COLORFORMAT, "Colorformat" },
{ VS_FORMAT_VENDOR, "Format Vendor" },
{ VS_FRAME_VENDOR, "Frame Vendor" },
{ VS_FORMAT_FRAME_BASED, "Format Frame-Based" },
{ VS_FRAME_FRAME_BASED, "Frame Frame-Based" },
{ VS_FORMAT_STREAM_BASED, "Format Stream Based" },
{ VS_FORMAT_H264, "Format H.264" },
{ VS_FRAME_H264, "Frame H.264" },
{ VS_FORMAT_H264_SIMULCAST, "Format H.264 Simulcast" },
{ VS_FORMAT_VP8, "Format VP8" },
{ VS_FRAME_VP8, "Frame VP8" },
{ VS_FORMAT_VP8_SIMULCAST, "Format VP8 Simulcast" },
{ 0, NULL }
};
static value_string_ext vs_if_descriptor_subtypes_ext =
VALUE_STRING_EXT_INIT(vs_if_descriptor_subtypes);
static const value_string interrupt_status_types[] = {
{ INT_VIDEOCONTROL, "VideoControl Interface" },
{ INT_VIDEOSTREAMING, "VideoStreaming Interface" },
{ 0, NULL }
};
static const value_string control_change_types[] = {
{ CONTROL_CHANGE_VALUE, "Value" },
{ CONTROL_CHANGE_INFO, "Info" },
{ CONTROL_CHANGE_FAILURE, "Failure" },
{ CONTROL_CHANGE_MIN, "Min" },
{ CONTROL_CHANGE_MAX, "Max" },
{ 0, NULL }
};
static value_string_ext control_change_types_ext =
VALUE_STRING_EXT_INIT(control_change_types);
static const value_string control_interrupt_events[] = {
{ CONTROL_INTERRUPT_EVENT_CONTROL_CHANGE, "Control Change" },
{ 0, NULL }
};
/* Table 3-13 VS Interface Input Header Descriptor - bStillCaptureMethod field */
static const value_string vs_still_capture_methods[] = {
{ 0, "None" },
{ 1, "Uninterrupted streaming" },
{ 2, "Suspended streaming" },
{ 3, "Dedicated pipe" },
{ 0, NULL }
};
static value_string_ext vs_still_capture_methods_ext =
VALUE_STRING_EXT_INIT(vs_still_capture_methods);
/* Table 3-13 VS Interface Input Header Descriptor - bTriggerUsage field */
static const value_string vs_trigger_usage[] = {
{ 0, "Initiate still image capture" },
{ 1, "General purpose button event" },
{ 0, NULL }
};
/* bmInterlaceFlags for format descriptors */
static const true_false_string is_interlaced_meaning = {
"Interlaced",
"Non-interlaced"
};
/* bmInterlaceFlags for format descriptors */
static const true_false_string interlaced_fields_meaning = {
"1 field",
"2 fields"
};
/* bmInterlaceFlags for format descriptors */
static const value_string field_pattern_meaning[] = {
{ 0, "Field 1 only" },
{ 1, "Field 2 only" },
{ 2, "Regular pattern of fields 1 and 2" },
{ 3, "Random pattern of fields 1 and 2" },
{0, NULL},
};
static value_string_ext field_pattern_meaning_ext =
VALUE_STRING_EXT_INIT(field_pattern_meaning);
/* bCopyProtect for format descriptors */
static const value_string copy_protect_meaning[] = {
{ 0, "No restrictions" },
{ 1, "Restrict duplication" },
{0, NULL},
};
/* Table 4-46 Video Probe and Commit Controls - bmHint field */
static const true_false_string probe_hint_meaning = {
"Constant",
"Variable"
};
/* Table 3-19 Color Matching Descriptor - bColorPrimaries field */
static const value_string color_primaries_meaning[] = {
{ 0, "Unspecified" },
{ 1, "BT.709, sRGB" },
{ 2, "BT.470-2 (M)" },
{ 3, "BT.470-2 (B,G)" },
{ 4, "SMPTE 170M" },
{ 5, "SMPTE 240M" },
{0, NULL},
};
static value_string_ext color_primaries_meaning_ext =
VALUE_STRING_EXT_INIT(color_primaries_meaning);
/* Table 3-19 Color Matching Descriptor - bTransferCharacteristics field */
static const value_string color_transfer_characteristics[] = {
{ 0, "Unspecified" },
{ 1, "BT.709" },
{ 2, "BT.470-2 (M)" },
{ 3, "BT.470-2 (B,G)" },
{ 4, "SMPTE 170M" },
{ 5, "SMPTE 240M" },
{ 6, "Linear (V=Lc)" },
{ 7, "sRGB" },
{0, NULL},
};
static value_string_ext color_transfer_characteristics_ext =
VALUE_STRING_EXT_INIT(color_transfer_characteristics);
/* Table 3-19 Color Matching Descriptor - bMatrixCoefficients field */
static const value_string matrix_coefficients_meaning[] = {
{ 0, "Unspecified" },
{ 1, "BT.709" },
{ 2, "FCC" },
{ 3, "BT.470-2 (B,G)" },
{ 4, "SMPTE 170M (BT.601)" },
{ 5, "SMPTE 240M" },
{0, NULL},
};
static value_string_ext matrix_coefficients_meaning_ext =
VALUE_STRING_EXT_INIT(matrix_coefficients_meaning);
static const value_string request_error_codes[] = {
{ UVC_ERROR_NONE, "No error" },
{ UVC_ERROR_NOT_READY, "Not ready" },
{ UVC_ERROR_WRONG_STATE, "Wrong state" },
{ UVC_ERROR_POWER, "Insufficient power" } ,
{ UVC_ERROR_OUT_OF_RANGE, "Out of range" },
{ UVC_ERROR_INVALID_UNIT, "Invalid unit" },
{ UVC_ERROR_INVALID_CONTROL, "Invalid control" },
{ UVC_ERROR_INVALID_REQUEST, "Invalid request" },
{ UVC_ERROR_INVALID_VALUE, "Invalid value within range" },
{ UVC_ERROR_UNKNOWN, "Unknown" },
{0, NULL},
};
static value_string_ext request_error_codes_ext =
VALUE_STRING_EXT_INIT(request_error_codes);
/* There is one such structure per terminal or unit per interface */
typedef struct
{
guint8 entityID;
guint8 subtype;
guint16 terminalType;
} video_entity_t;
/* video_entity_t's (units/terminals) associated with each video interface */
/* There is one such structure for each video conversation (interface) */
typedef struct _video_conv_info_t {
wmem_tree_t* entities; /* indexed by entity ID */
} video_conv_info_t;
/*****************************************************************************/
/* UTILITY FUNCTIONS */
/*****************************************************************************/
/**
* Dissector for variable-length bmControl bitmask / bControlSize pair.
*
* Creates an item for bControlSize, and a subtree for the bmControl bitmask.
*
* @param tree protocol tree to be the parent of the bitmask subtree
* @param tvb the tv_buff with the (remaining) packet data
* @param offset where in tvb to find bControlSize field
* @param ett_subtree index of the subtree to use for this bitmask
* @param bm_items NULL-terminated array of pointers that lists all the fields
* of the bitmask
*
* @return offset within tvb at which dissection should continue
*/
static int
dissect_bmControl(proto_tree *tree, tvbuff_t *tvb, int offset,
gint ett_subtree, const int** bm_items)
{
guint8 bm_size = 0;
bm_size = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_bControlSize, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
if (bm_size > 0)
{
proto_tree_add_bitmask_len(tree, tvb, offset, bm_size, hf_usb_vid_bmControl,
ett_subtree, bm_items, &ei_usb_vid_bitmask_len, ENC_LITTLE_ENDIAN);
offset += bm_size;
}
return offset;
}
/*****************************************************************************/
/* VIDEO CONTROL DESCRIPTORS */
/*****************************************************************************/
/* Dissect a Camera Terminal descriptor */
static int
dissect_usb_video_camera_terminal(proto_tree *tree, tvbuff_t *tvb, int offset)
{
static const int *control_bits[] = {
&hf_usb_vid_cam_control_D[0],
&hf_usb_vid_cam_control_D[1],
&hf_usb_vid_cam_control_D[2],
&hf_usb_vid_cam_control_D[3],
&hf_usb_vid_cam_control_D[4],
&hf_usb_vid_cam_control_D[5],
&hf_usb_vid_cam_control_D[6],
&hf_usb_vid_cam_control_D[7],
&hf_usb_vid_cam_control_D[8],
&hf_usb_vid_cam_control_D[9],
&hf_usb_vid_cam_control_D[10],
&hf_usb_vid_cam_control_D[11],
&hf_usb_vid_cam_control_D[12],
&hf_usb_vid_cam_control_D[13],
&hf_usb_vid_cam_control_D[14],
&hf_usb_vid_cam_control_D[15],
&hf_usb_vid_cam_control_D[16],
&hf_usb_vid_cam_control_D[17],
&hf_usb_vid_cam_control_D[18],
&hf_usb_vid_cam_control_D[19],
&hf_usb_vid_cam_control_D[20],
&hf_usb_vid_cam_control_D[21],
NULL
};
DISSECTOR_ASSERT(array_length(control_bits) == (1+array_length(hf_usb_vid_cam_control_D)));
proto_tree_add_item(tree, hf_usb_vid_cam_objective_focal_len_min, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
proto_tree_add_item(tree, hf_usb_vid_cam_objective_focal_len_max, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
proto_tree_add_item(tree, hf_usb_vid_cam_ocular_focal_len, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
offset = dissect_bmControl(tree, tvb, offset, ett_camera_controls, control_bits);
return offset;
}
/* Dissect a Processing Unit descriptor */
static int
dissect_usb_video_processing_unit(proto_tree *tree, tvbuff_t *tvb, int offset)
{
static const int *control_bits[] = {
&hf_usb_vid_proc_control_D[0],
&hf_usb_vid_proc_control_D[1],
&hf_usb_vid_proc_control_D[2],
&hf_usb_vid_proc_control_D[3],
&hf_usb_vid_proc_control_D[4],
&hf_usb_vid_proc_control_D[5],
&hf_usb_vid_proc_control_D[6],
&hf_usb_vid_proc_control_D[7],
&hf_usb_vid_proc_control_D[8],
&hf_usb_vid_proc_control_D[9],
&hf_usb_vid_proc_control_D[10],
&hf_usb_vid_proc_control_D[11],
&hf_usb_vid_proc_control_D[12],
&hf_usb_vid_proc_control_D[13],
&hf_usb_vid_proc_control_D[14],
&hf_usb_vid_proc_control_D[15],
&hf_usb_vid_proc_control_D[16],
&hf_usb_vid_proc_control_D[17],
&hf_usb_vid_proc_control_D[18],
NULL
};
DISSECTOR_ASSERT(array_length(control_bits) == (1+array_length(hf_usb_vid_proc_control_D)));
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_src_id, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_max_multiplier, tvb, offset+1, 2, ENC_LITTLE_ENDIAN);
offset += 3;
offset = dissect_bmControl(tree, tvb, offset, ett_processing_controls, control_bits);
proto_tree_add_item(tree, hf_usb_vid_iProcessing, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
/* UVC 1.1 added bmVideoStandards */
if (tvb_reported_length_remaining(tvb, offset) > 0)
{
static const int *standard_bits[] = {
&hf_usb_vid_proc_standards_D[0],
&hf_usb_vid_proc_standards_D[1],
&hf_usb_vid_proc_standards_D[2],
&hf_usb_vid_proc_standards_D[3],
&hf_usb_vid_proc_standards_D[4],
&hf_usb_vid_proc_standards_D[5],
NULL
};
DISSECTOR_ASSERT(array_length(standard_bits) == (1+array_length(hf_usb_vid_proc_standards_D)));
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_proc_standards,
ett_video_standards, standard_bits, ENC_NA);
++offset;
}
return offset;
}
/* Dissect a Selector Unit descriptor */
static int
dissect_usb_video_selector_unit(proto_tree *tree, tvbuff_t *tvb, int offset)
{
guint8 num_inputs;
num_inputs = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_num_inputs, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
if (num_inputs > 0)
{
proto_tree_add_item(tree, hf_usb_vid_sources, tvb, offset, num_inputs, ENC_NA);
offset += num_inputs;
}
proto_tree_add_item(tree, hf_usb_vid_iSelector, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
return offset;
}
/* Dissect an Extension Unit descriptor */
static int
dissect_usb_video_extension_unit(proto_tree *tree, tvbuff_t *tvb, int offset)
{
guint8 num_inputs;
guint8 control_size;
proto_tree_add_item(tree, hf_usb_vid_exten_guid, tvb, offset, 16, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_exten_num_controls, tvb, offset+16, 1, ENC_LITTLE_ENDIAN);
offset += 17;
num_inputs = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_num_inputs, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
if (num_inputs > 0)
{
proto_tree_add_item(tree, hf_usb_vid_sources, tvb, offset, num_inputs, ENC_NA);
offset += num_inputs;
}
control_size = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_bControlSize, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
if (control_size > 0)
{
if (control_size <= proto_registrar_get_length(hf_usb_vid_bmControl))
{
proto_tree_add_item(tree, hf_usb_vid_bmControl, tvb, offset, control_size,
ENC_LITTLE_ENDIAN);
}
else
{
/* Too big to display as integer */
/* @todo Display as FT_BYTES with a big-endian disclaimer?
* See https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=7933
*/
proto_tree_add_bytes_format(tree, hf_usb_vid_bmControl_bytes, tvb, offset, control_size, NULL, "bmControl");
}
offset += control_size;
}
proto_tree_add_item(tree, hf_usb_vid_iExtension, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
return offset;
}
/**
* Dissector for video class control interface descriptors
*
* @param parent_tree the protocol tree to be the parent of the descriptor subtree
* @param tvb the tv_buff with the (remaining) packet data
* On entry the gaze is set to the descriptor length field.
* @param descriptor_len Length of the descriptor to dissect
* @param pinfo Information associated with the packet being dissected
*
* @return offset within tvb at which dissection should continue
*/
static int
dissect_usb_video_control_interface_descriptor(proto_tree *parent_tree, tvbuff_t *tvb,
guint8 descriptor_len, packet_info *pinfo, usb_conv_info_t *usb_conv_info)
{
video_conv_info_t *video_conv_info = NULL;
video_entity_t *entity = NULL;
proto_item *item = NULL;
proto_item *subtype_item = NULL;
proto_tree *tree = NULL;
guint8 entity_id = 0;
guint16 terminal_type = 0;
int offset = 0;
guint8 subtype;
subtype = tvb_get_guint8(tvb, offset+2);
if (parent_tree)
{
const gchar *subtype_str;
subtype_str = val_to_str_ext(subtype, &vc_if_descriptor_subtypes_ext, "Unknown (0x%x)");
tree = proto_tree_add_subtree_format(parent_tree, tvb, offset, descriptor_len,
ett_descriptor_video_control, &item, "VIDEO CONTROL INTERFACE DESCRIPTOR [%s]",
subtype_str);
}
/* Common fields */
dissect_usb_descriptor_header(tree, tvb, offset, &vid_descriptor_type_vals_ext);
subtype_item = proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_subtype, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
offset += 3;
if (subtype == VC_HEADER)
{
guint8 num_vs_interfaces;
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_bcdUVC, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_ifdesc_wTotalLength, tvb, offset+2, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_dwClockFrequency, tvb, offset+4, 4, ENC_LITTLE_ENDIAN);
num_vs_interfaces = tvb_get_guint8(tvb, offset+8);
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_bInCollection, tvb, offset+8, 1, ENC_LITTLE_ENDIAN);
if (num_vs_interfaces > 0)
{
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_baInterfaceNr, tvb, offset+9, num_vs_interfaces, ENC_NA);
}
offset += 9 + num_vs_interfaces;
}
else if ((subtype == VC_INPUT_TERMINAL) || (subtype == VC_OUTPUT_TERMINAL))
{
/* Fields common to input and output terminals */
entity_id = tvb_get_guint8(tvb, offset);
terminal_type = tvb_get_letohs(tvb, offset+1);
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_terminal_id, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_terminal_type, tvb, offset+1, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_assoc_terminal, tvb, offset+3, 1, ENC_LITTLE_ENDIAN);
offset += 4;
if (subtype == VC_OUTPUT_TERMINAL)
{
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_src_id, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
}
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_iTerminal, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
if (subtype == VC_INPUT_TERMINAL)
{
if (terminal_type == ITT_CAMERA)
{
offset = dissect_usb_video_camera_terminal(tree, tvb, offset);
}
else if (terminal_type == ITT_MEDIA_TRANSPORT_INPUT)
{
/* @todo */
}
}
if (subtype == VC_OUTPUT_TERMINAL)
{
if (terminal_type == OTT_MEDIA_TRANSPORT_OUTPUT)
{
/* @todo */
}
}
}
else
{
/* Field common to extension / processing / selector / encoding units */
entity_id = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_control_ifdesc_unit_id, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
if (subtype == VC_PROCESSING_UNIT)
{
offset = dissect_usb_video_processing_unit(tree, tvb, offset);
}
else if (subtype == VC_SELECTOR_UNIT)
{
offset = dissect_usb_video_selector_unit(tree, tvb, offset);
}
else if (subtype == VC_EXTENSION_UNIT)
{
offset = dissect_usb_video_extension_unit(tree, tvb, offset);
}
else if (subtype == VC_ENCODING_UNIT)
{
/* @todo UVC 1.5 */
}
else
{
expert_add_info_format(pinfo, subtype_item, &ei_usb_vid_subtype_unknown,
"Unknown VC subtype %u", subtype);
}
}
/* Soak up descriptor bytes beyond those we know how to dissect */
if (offset < descriptor_len)
{
proto_tree_add_item(tree, hf_usb_vid_descriptor_data, tvb, offset, descriptor_len-offset, ENC_NA);
/* offset = descriptor_len; */
}
if (entity_id != 0)
proto_item_append_text(item, " (Entity %d)", entity_id);
if (subtype != VC_HEADER && usb_conv_info)
{
/* Switch to the usb_conv_info of the Video Control interface */
usb_conv_info = get_usb_iface_conv_info(pinfo, usb_conv_info->interfaceNum);
video_conv_info = (video_conv_info_t *)usb_conv_info->class_data;
if (!video_conv_info)
{
video_conv_info = wmem_new(wmem_file_scope(), video_conv_info_t);
video_conv_info->entities = wmem_tree_new(wmem_file_scope());
usb_conv_info->class_data = video_conv_info;
usb_conv_info->class_data_type = USB_CONV_VIDEO;
} else if (usb_conv_info->class_data_type != USB_CONV_VIDEO) {
/* Stop dissection if another USB type is in the conversation */
return descriptor_len;
}
entity = (video_entity_t*) wmem_tree_lookup32(video_conv_info->entities, entity_id);
if (!entity)
{
entity = wmem_new(wmem_file_scope(), video_entity_t);
entity->entityID = entity_id;
entity->subtype = subtype;
entity->terminalType = terminal_type;
wmem_tree_insert32(video_conv_info->entities, entity_id, entity);
}
}
return descriptor_len;
}
/*****************************************************************************/
/* VIDEO STREAMING DESCRIPTORS */
/*****************************************************************************/
/* Dissect a Video Streaming Input Header descriptor */
static int
dissect_usb_video_streaming_input_header(proto_tree *tree, tvbuff_t *tvb, int offset)
{
guint8 num_formats;
guint8 bm_size;
static const int *info_bits[] = {
&hf_usb_vid_streaming_info_D[0],
NULL
};
static const int *control_bits[] = {
&hf_usb_vid_streaming_control_D[0],
&hf_usb_vid_streaming_control_D[1],
&hf_usb_vid_streaming_control_D[2],
&hf_usb_vid_streaming_control_D[3],
&hf_usb_vid_streaming_control_D[4],
&hf_usb_vid_streaming_control_D[5],
NULL
};
DISSECTOR_ASSERT(array_length(control_bits) == (1+array_length(hf_usb_vid_streaming_control_D)));
num_formats = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_streaming_ifdesc_bNumFormats, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_ifdesc_wTotalLength, tvb, offset+1, 2, ENC_LITTLE_ENDIAN);
offset += 3;
dissect_usb_endpoint_address(tree, tvb, offset);
offset++;
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_streaming_bmInfo,
ett_streaming_info, info_bits, ENC_NA);
proto_tree_add_item(tree, hf_usb_vid_streaming_terminal_link, tvb, offset+1, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_streaming_still_capture_method, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
offset += 3;
proto_tree_add_item(tree, hf_usb_vid_streaming_trigger_support, tvb, offset, 1, ENC_NA);
if (tvb_get_guint8(tvb, offset) > 0)
{
proto_tree_add_item(tree, hf_usb_vid_streaming_trigger_usage, tvb, offset+1, 1, ENC_LITTLE_ENDIAN);
}
else
{
proto_tree_add_uint_format_value(tree, hf_usb_vid_streaming_trigger_usage, tvb, offset+1, 1, 0, "Not applicable");
}
offset += 2;
/* NOTE: Can't use dissect_bmControl here because there's only one size
* field for (potentially) multiple bmControl fields
*/
bm_size = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_bControlSize, tvb, offset, 1, ENC_LITTLE_ENDIAN);
++offset;
if (bm_size > 0)
{
guint8 i;
for (i=0; i<num_formats; ++i)
{
proto_tree_add_bitmask_len(tree, tvb, offset, bm_size, hf_usb_vid_bmControl,
ett_streaming_controls, control_bits, &ei_usb_vid_bitmask_len,
ENC_LITTLE_ENDIAN);
offset += bm_size;
}
}
return offset;
}
/**
* Dissect a known Video Payload Format descriptor.
*
* @param tree protocol tree to which fields should be added
* @param tvb the tv_buff with the (remaining) packet data
* @param offset where in tvb to begin dissection.
* On entry this refers to the bFormatIndex field.
* @param subtype Type of format descriptor, from the
* bDescriptorSubtype field
*
* @return offset within tvb at which dissection should continue
*/
static int
dissect_usb_video_format(proto_tree *tree, tvbuff_t *tvb, int offset,
guint8 subtype)
{
static const int *interlace_bits[] = {
&hf_usb_vid_is_interlaced,
&hf_usb_vid_interlaced_fields,
&hf_usb_vid_field_1_first,
&hf_usb_vid_field_pattern,
NULL
};
proto_item *desc_item;
guint8 format_index;
/* Augment the descriptor root item with the index of this descriptor */
format_index = tvb_get_guint8(tvb, offset);
desc_item = proto_tree_get_parent(tree);
proto_item_append_text(desc_item, " (Format %u)", format_index);
proto_tree_add_item(tree, hf_usb_vid_format_index, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_format_num_frame_descriptors, tvb, offset+1, 1, ENC_LITTLE_ENDIAN);
offset += 2;
if ((subtype == VS_FORMAT_UNCOMPRESSED) || (subtype == VS_FORMAT_FRAME_BASED))
{
/* Augment the descriptor root item with the format's four-character-code */
char fourcc[5];
tvb_memcpy(tvb, (guint8 *)fourcc, offset, 4);
fourcc[4] = '\0';
proto_item_append_text(desc_item, ": %s", fourcc);
proto_tree_add_item(tree, hf_usb_vid_format_guid, tvb, offset, 16, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_format_bits_per_pixel, tvb, offset+16, 1, ENC_LITTLE_ENDIAN);
offset += 17;
}
else if (subtype == VS_FORMAT_MJPEG)
{
static const int * flags[] = {
&hf_usb_vid_mjpeg_fixed_samples,
NULL
};
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_mjpeg_flags, ett_mjpeg_flags, flags, ENC_NA);
offset++;
}
else
{
/* We should only be called for known format descriptor subtypes */
DISSECTOR_ASSERT_NOT_REACHED();
}
proto_tree_add_item(tree, hf_usb_vid_default_frame_index, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_aspect_ratio_x, tvb, offset+1, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_aspect_ratio_y, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
offset += 3;
#if 0
/* @todo Display "N/A" if Camera Terminal does not support scanning mode control */
if (something)
proto_tree_add_uint_format_value(tree, hf_usb_vid_interlace_flags, tvb, offset, 1, tvb_get_guint8(tvb, offset), "Not applicable");
#endif
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_interlace_flags,
ett_interlace_flags, interlace_bits, ENC_NA);
offset++;
proto_tree_add_item(tree, hf_usb_vid_copy_protect, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
if (subtype == VS_FORMAT_FRAME_BASED)
{
proto_tree_add_item(tree, hf_usb_vid_variable_size, tvb, offset, 1, ENC_NA);
offset++;
}
return offset;
}
/**
* Dissect a known Video Frame descriptor.
*
* @param tree protocol tree to which fields should be added
* @param tvb the tv_buff with the (remaining) packet data
* @param offset where in tvb to begin dissection.
* On entry this refers to the bFrameIndex field.
* @param subtype Type of frame descriptor, from the
* bDescriptorSubtype field
*
* @return offset within tvb at which dissection should continue
*/
static int
dissect_usb_video_frame(proto_tree *tree, tvbuff_t *tvb, int offset,
guint8 subtype)
{
static const int *capability_bits[] = {
&hf_usb_vid_frame_stills_supported,
&hf_usb_vid_frame_fixed_frame_rate,
NULL
};
proto_item *desc_item;
guint8 bFrameIntervalType;
guint8 frame_index;
guint16 frame_width;
guint16 frame_height;
frame_index = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_frame_index, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_frame_capabilities,
ett_frame_capability_flags, capability_bits, ENC_NA);
offset++;
proto_tree_add_item(tree, hf_usb_vid_frame_width, tvb, offset, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_frame_height, tvb, offset+2, 2, ENC_LITTLE_ENDIAN);
/* Augment the descriptor root item with useful information */
frame_width = tvb_get_letohs(tvb, offset);
frame_height = tvb_get_letohs(tvb, offset+2);
desc_item = proto_tree_get_parent(tree);
proto_item_append_text(desc_item, " (Index %2u): %4u x %4u", frame_index, frame_width, frame_height);
proto_tree_add_item(tree, hf_usb_vid_frame_min_bit_rate, tvb, offset+4, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_frame_max_bit_rate, tvb, offset+8, 4, ENC_LITTLE_ENDIAN);
offset += 12;
if (subtype != VS_FRAME_FRAME_BASED)
{
proto_tree_add_item(tree, hf_usb_vid_frame_max_frame_sz, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
}
proto_tree_add_item(tree, hf_usb_vid_frame_default_interval, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
bFrameIntervalType = tvb_get_guint8(tvb, offset);
if (bFrameIntervalType == 0)
{
proto_tree_add_uint_format_value(tree, hf_usb_vid_frame_interval_type, tvb, offset, 1,
bFrameIntervalType, "Continuous (0)");
offset++;
if (subtype == VS_FRAME_FRAME_BASED)
{
proto_tree_add_item(tree, hf_usb_vid_frame_bytes_per_line, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
}
proto_tree_add_item(tree, hf_usb_vid_frame_min_interval, tvb, offset, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_frame_max_interval, tvb, offset+4, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_frame_step_interval, tvb, offset+8, 4, ENC_LITTLE_ENDIAN);
offset += 12;
}
else
{
guint8 i;
proto_tree_add_uint_format_value(tree, hf_usb_vid_frame_interval_type, tvb, offset, 1,
bFrameIntervalType, "Discrete (%u choice%s)",
bFrameIntervalType, (bFrameIntervalType > 1) ? "s" : "");
offset++;
if (subtype == VS_FRAME_FRAME_BASED)
{
proto_tree_add_item(tree, hf_usb_vid_frame_bytes_per_line, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
}
for (i=0; i<bFrameIntervalType; ++i)
{
proto_tree_add_item(tree, hf_usb_vid_frame_interval, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
}
}
return offset;
}
/* Dissect a Color Matching descriptor */
static int
dissect_usb_video_colorformat(proto_tree *tree, tvbuff_t *tvb, int offset)
{
proto_tree_add_item(tree, hf_usb_vid_color_primaries, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_transfer_characteristics, tvb, offset+1, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_matrix_coefficients, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
offset +=3;
return offset;
}
/**
* Dissector for video class streaming interface descriptors.
*
* @param parent_tree the protocol tree to be the parent of the descriptor subtree
* @param tvb the tv_buff with the (remaining) packet data
* On entry the gaze is set to the descriptor length field.
* @param descriptor_len Length of the descriptor to dissect
*
* @return offset within tvb at which dissection should continue
*/
static int
dissect_usb_video_streaming_interface_descriptor(proto_tree *parent_tree, tvbuff_t *tvb,
guint8 descriptor_len)
{
proto_tree *tree;
int offset = 0;
const gchar *subtype_str;
guint8 subtype;
subtype = tvb_get_guint8(tvb, offset+2);
subtype_str = val_to_str_ext(subtype, &vs_if_descriptor_subtypes_ext, "Unknown (0x%x)");
tree = proto_tree_add_subtree_format(parent_tree, tvb, offset, descriptor_len,
ett_descriptor_video_streaming, NULL, "VIDEO STREAMING INTERFACE DESCRIPTOR [%s]",
subtype_str);
dissect_usb_descriptor_header(tree, tvb, offset, &vid_descriptor_type_vals_ext);
proto_tree_add_item(tree, hf_usb_vid_streaming_ifdesc_subtype, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
offset += 3;
switch (subtype)
{
case VS_INPUT_HEADER:
offset = dissect_usb_video_streaming_input_header(tree, tvb, offset);
break;
case VS_FORMAT_UNCOMPRESSED:
case VS_FORMAT_MJPEG:
case VS_FORMAT_FRAME_BASED:
offset = dissect_usb_video_format(tree, tvb, offset, subtype);
break;
/* @todo MPEG2, H.264, VP8, Still Image Frame */
/* @todo Obsolete UVC-1.0 descriptors? */
case VS_FRAME_UNCOMPRESSED:
case VS_FRAME_MJPEG:
case VS_FRAME_FRAME_BASED:
offset = dissect_usb_video_frame(tree, tvb, offset, subtype);
break;
case VS_COLORFORMAT:
offset = dissect_usb_video_colorformat(tree, tvb, offset);
break;
default:
break;
}
/* Soak up descriptor bytes beyond those we know how to dissect */
if (offset < descriptor_len)
proto_tree_add_item(tree, hf_usb_vid_descriptor_data, tvb, offset, descriptor_len-offset, ENC_NA);
return descriptor_len;
}
/*****************************************************************************/
/**
* Dissector for video class-specific endpoint descriptor.
*
* @param parent_tree the protocol tree to be the parent of the descriptor subtree
* @param tvb the tv_buff with the (remaining) packet data
* On entry the gaze is set to the descriptor length field.
* @param descriptor_len Length of the descriptor to dissect
*
* @return offset within tvb at which dissection should continue
*/
static int
dissect_usb_video_endpoint_descriptor(proto_tree *parent_tree, tvbuff_t *tvb,
guint8 descriptor_len)
{
proto_tree *tree = NULL;
int offset = 0;
guint8 subtype;
subtype = tvb_get_guint8(tvb, offset+2);
if (parent_tree)
{
const gchar* subtype_str;
subtype_str = val_to_str(subtype, vc_ep_descriptor_subtypes, "Unknown (0x%x)");
tree = proto_tree_add_subtree_format(parent_tree, tvb, offset, descriptor_len,
ett_descriptor_video_endpoint, NULL, "VIDEO CONTROL ENDPOINT DESCRIPTOR [%s]",
subtype_str);
}
dissect_usb_descriptor_header(tree, tvb, offset, &vid_descriptor_type_vals_ext);
proto_tree_add_item(tree, hf_usb_vid_epdesc_subtype, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
offset += 3;
if (subtype == EP_INTERRUPT)
{
proto_tree_add_item(tree, hf_usb_vid_epdesc_max_transfer_sz, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
}
/* Soak up descriptor bytes beyond those we know how to dissect */
if (offset < descriptor_len)
proto_tree_add_item(tree, hf_usb_vid_descriptor_data, tvb, offset, descriptor_len-offset, ENC_NA);
return descriptor_len;
}
/**
* Registered dissector for video class-specific descriptors
*
* @param tvb the tv_buff with the (remaining) packet data
* On entry the gaze is set to the descriptor length field.
* @param pinfo the packet info of this packet (additional info)
* @param tree the protocol tree to be built or NULL
* @param data Not used
*
* @return 0 no class specific dissector was found
* @return <0 not enough data
* @return >0 amount of data in the descriptor
*/
static int
dissect_usb_vid_descriptor(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
{
int offset = 0;
guint8 descriptor_len;
guint8 descriptor_type;
gint bytes_available;
usb_conv_info_t *usb_conv_info = (usb_conv_info_t *)data;
tvbuff_t *desc_tvb;
descriptor_len = tvb_get_guint8(tvb, offset);
descriptor_type = tvb_get_guint8(tvb, offset+1);
bytes_available = tvb_captured_length_remaining(tvb, offset);
desc_tvb = tvb_new_subset(tvb, 0, bytes_available, descriptor_len);
if (descriptor_type == CS_ENDPOINT)
{
offset = dissect_usb_video_endpoint_descriptor(tree, desc_tvb,
descriptor_len);
}
else if (descriptor_type == CS_INTERFACE)
{
if (usb_conv_info && usb_conv_info->interfaceSubclass == SC_VIDEOCONTROL)
{
offset = dissect_usb_video_control_interface_descriptor(tree, desc_tvb,
descriptor_len,
pinfo, usb_conv_info);
}
else if (usb_conv_info && usb_conv_info->interfaceSubclass == SC_VIDEOSTREAMING)
{
offset = dissect_usb_video_streaming_interface_descriptor(tree, desc_tvb,
descriptor_len);
}
}
/* else not something we recognize, just return offset = 0 */
return offset;
}
/*****************************************************************************/
/* CONTROL TRANSFERS */
/*****************************************************************************/
/**
* Dissect GET/SET transactions on the Video Probe and Commit controls.
*
* @param parent_tree protocol tree to which the probe/commit subtree should be added
* @param tvb the tv_buff with the (remaining) packet data
* @param offset where in tvb to begin dissection.
* On entry this refers to the probe/commit bmHint field.
*
* @return offset within tvb at which dissection should continue
*/
static int
dissect_usb_vid_probe(proto_tree *parent_tree, tvbuff_t *tvb, int offset)
{
proto_tree *tree;
static const int *hint_bits[] = {
&hf_usb_vid_probe_hint_D[0],
&hf_usb_vid_probe_hint_D[1],
&hf_usb_vid_probe_hint_D[2],
&hf_usb_vid_probe_hint_D[3],
&hf_usb_vid_probe_hint_D[4],
NULL
};
DISSECTOR_ASSERT(array_length(hint_bits) == (1+array_length(hf_usb_vid_probe_hint_D)));
tree = proto_tree_add_subtree(parent_tree, tvb, offset, -1, ett_video_probe, NULL, "Probe/Commit Info");
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_probe_hint,
ett_probe_hint, hint_bits, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_format_index, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_frame_index, tvb, offset+3, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_frame_interval, tvb, offset+4, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_key_frame_rate, tvb, offset+8, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_p_frame_rate, tvb, offset+10, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_comp_quality, tvb, offset+12, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_comp_window, tvb, offset+14, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_delay, tvb, offset+16, 2, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_max_frame_sz, tvb, offset+18, 4, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_max_payload_sz, tvb, offset+22, 4, ENC_LITTLE_ENDIAN);
offset += 26;
/* UVC 1.1 fields */
if (tvb_reported_length_remaining(tvb, offset) > 0)
{
static const int *framing_bits[] = {
&hf_usb_vid_probe_framing_D[0],
&hf_usb_vid_probe_framing_D[1],
NULL
};
DISSECTOR_ASSERT(array_length(framing_bits) == (1+array_length(hf_usb_vid_probe_framing_D)));
proto_tree_add_item(tree, hf_usb_vid_probe_clock_freq, tvb, offset, 4, ENC_LITTLE_ENDIAN);
offset += 4;
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_probe_framing,
ett_probe_framing, framing_bits, ENC_NA);
offset++;
proto_tree_add_item(tree, hf_usb_vid_probe_preferred_ver, tvb, offset, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_min_ver, tvb, offset+1, 1, ENC_LITTLE_ENDIAN);
proto_tree_add_item(tree, hf_usb_vid_probe_max_ver, tvb, offset+2, 1, ENC_LITTLE_ENDIAN);
offset += 3;
}
return offset;
}
/**
* Fetch the table that describes known control selectors for the specified unit/terminal.
*
* @param entity_id Unit or terminal of interest
* @param usb_conv_info Information about the interface the entity is part of
*
* @return Table describing control selectors for the specified entity (may be NULL)
*/
static value_string_ext*
get_control_selector_values(guint8 entity_id, usb_conv_info_t *usb_conv_info)
{
video_conv_info_t *video_conv_info;
video_entity_t *entity = NULL;
value_string_ext *selectors = NULL;
if (usb_conv_info == NULL)
return NULL;
video_conv_info = (video_conv_info_t *)usb_conv_info->class_data;
if (video_conv_info)
entity = (video_entity_t*) wmem_tree_lookup32(video_conv_info->entities, entity_id);
if (entity_id == 0)
{
/* Interface Request*/
switch (usb_conv_info->interfaceSubclass)
{
case SC_VIDEOCONTROL:
selectors = &cs_control_interface_ext;
break;
case SC_VIDEOSTREAMING:
selectors = &cs_streaming_interface_ext;
break;
default:
break;
}
}
else if (entity)
{
switch (entity->subtype)
{
case VC_INPUT_TERMINAL:
if (entity->terminalType == ITT_CAMERA)
{
selectors = &cs_camera_terminal_ext;
}
break;
case VC_PROCESSING_UNIT:
selectors = &cs_processing_unit_ext;
break;
case VC_SELECTOR_UNIT:
selectors = &cs_selector_unit_ext;
break;
default:
break;
}
}
return selectors;
}
/**
* Fetch the name of an entity's control.
*
* @param entity_id Unit or terminal of interest
* @param control_sel Control of interest
* @param usb_conv_info Information about the interface the entity is part of
*
* @return Table describing control selectors for the specified entity (may be NULL)
*/
static const gchar*
get_control_selector_name(guint8 entity_id, guint8 control_sel, usb_conv_info_t *usb_conv_info)
{
const gchar *control_name = NULL;
value_string_ext *selectors = NULL;
selectors = get_control_selector_values(entity_id, usb_conv_info);
if (selectors)
control_name = try_val_to_str_ext(control_sel, selectors);
return control_name;
}
/* Dissect the response to a GET INFO request */
static int
dissect_usb_vid_control_info(proto_tree *tree, tvbuff_t *tvb, int offset)
{
static const int *capability_bits[] = {
&hf_usb_vid_control_info_D[0],
&hf_usb_vid_control_info_D[1],
&hf_usb_vid_control_info_D[2],
&hf_usb_vid_control_info_D[3],
&hf_usb_vid_control_info_D[4],
&hf_usb_vid_control_info_D[5],
&hf_usb_vid_control_info_D[6],
NULL
};
DISSECTOR_ASSERT(array_length(capability_bits) == (1+array_length(hf_usb_vid_control_info_D)));
proto_tree_add_bitmask(tree, tvb, offset, hf_usb_vid_control_info,
ett_control_capabilities, capability_bits, ENC_NA);
return offset+1;
}
/* Dissect all remaining bytes in the tvb as a specified type of UVC value.
* These are displayed as an unsigned integer where possible, otherwise just as
* a text item.
*
* @param tree the protocol tree to which an item will be added
* @param tvb the tv_buff with the (remaining) packet data
* @param offset How far into tvb the value data begins
* @param request Identifies type of value - either bRequest from a CONTROL
* transfer (i.e., USB_SETUP_GET_MAX), or bValue from an
* INTERRUPT transfer (i.e., CONTROL_CHANGE_MAX).
*/
static void
dissect_usb_vid_control_value(proto_tree *tree, tvbuff_t *tvb, int offset, guint8 request)
{
gint value_size;
const char *fallback_name;
int hf;
switch (request)
{
case USB_SETUP_GET_DEF:
hf = hf_usb_vid_control_default;
fallback_name = "Default Value";
break;
case USB_SETUP_GET_MIN:
case CONTROL_CHANGE_MIN:
hf = hf_usb_vid_control_min;
fallback_name = "Min Value";
break;
case USB_SETUP_GET_MAX:
case CONTROL_CHANGE_MAX:
hf = hf_usb_vid_control_max;
fallback_name = "Max Value";
break;
case USB_SETUP_GET_RES:
hf = hf_usb_vid_control_res;
fallback_name = "Resolution";
break;
case USB_SETUP_GET_CUR:
case USB_SETUP_SET_CUR:
case CONTROL_CHANGE_VALUE:
hf = hf_usb_vid_control_cur;
fallback_name = "Current Value";
break;
/* @todo UVC 1.5 USB_SETUP_x_ALL?
* They are poorly specified.
*/
default:
hf = -1;
fallback_name = "Value";
break;
}
value_size = tvb_reported_length_remaining(tvb, offset);
if (hf != -1)
{
header_field_info *hfinfo;
hfinfo = proto_registrar_get_nth(hf);
DISSECTOR_ASSERT(IS_FT_INT(hfinfo->type) || IS_FT_UINT(hfinfo->type));
}
if ((hf != -1) && (value_size <= 4))
{
proto_tree_add_item(tree, hf, tvb, offset, value_size, ENC_LITTLE_ENDIAN);
}
else
{
/* @todo Display as FT_BYTES with a big-endian disclaimer?
* See https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=7933
*/
proto_tree_add_bytes_format(tree, hf_usb_vid_control_value, tvb, offset, value_size, NULL, "%s", fallback_name);
}
}
/**
* Dissect video class GET/SET transactions.
*
* @param pinfo Information associated with the packet being dissected
* @param tree protocol tree to which fields should be added
* @param tvb the tv_buff with the (remaining) packet data
* @param offset where in tvb to begin dissection.
* On entry this refers to the bRequest field of the SETUP
* transaction.
* @param is_request true if the packet is host-to-device,
* false if device-to-host
* @param usb_trans_info Information specific to this request/response pair
* @param usb_conv_info Information about the conversation with the host
*/
static int
dissect_usb_vid_get_set(packet_info *pinfo, proto_tree *tree, tvbuff_t *tvb,
int offset, gboolean is_request,
usb_trans_info_t *usb_trans_info,
usb_conv_info_t *usb_conv_info)
{
const gchar *short_name = NULL;
guint8 control_sel;
guint8 entity_id;
entity_id = usb_trans_info->setup.wIndex >> 8;
control_sel = usb_trans_info->setup.wValue >> 8;
/* Display something informative in the INFO column */
col_append_str(pinfo->cinfo, COL_INFO, " [");
short_name = get_control_selector_name(entity_id, control_sel, usb_conv_info);
if (short_name)
col_append_str(pinfo->cinfo, COL_INFO, short_name);
else
{
short_name = "Unknown";
if (entity_id == 0)
{
col_append_fstr(pinfo->cinfo, COL_INFO, "Interface %u control 0x%x",
usb_conv_info->interfaceNum, control_sel);
}
else
{
col_append_fstr(pinfo->cinfo, COL_INFO, "Unit %u control 0x%x",
entity_id, control_sel);
}
}
col_append_str(pinfo->cinfo, COL_INFO, "]");
col_set_fence(pinfo->cinfo, COL_INFO);
/* Add information on request context,
* as GENERATED fields if not directly available (for filtering)
*/
if (is_request)
{
/* Move gaze to control selector (MSB of wValue) */
offset++;
proto_tree_add_uint_format_value(tree, hf_usb_vid_control_selector, tvb,
offset, 1, control_sel, "%s (0x%02x)", short_name, control_sel);
offset++;
proto_tree_add_item(tree, hf_usb_vid_control_interface, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
proto_tree_add_item(tree, hf_usb_vid_control_entity, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
proto_tree_add_item(tree, hf_usb_vid_length, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
}
else
{
proto_item *ti;
ti = proto_tree_add_uint(tree, hf_usb_vid_control_interface, tvb, 0, 0,
usb_trans_info->setup.wIndex & 0xFF);
PROTO_ITEM_SET_GENERATED(ti);
ti = proto_tree_add_uint(tree, hf_usb_vid_control_entity, tvb, 0, 0, entity_id);
PROTO_ITEM_SET_GENERATED(ti);
ti = proto_tree_add_uint_format_value(tree, hf_usb_vid_control_selector, tvb,
0, 0, control_sel, "%s (0x%02x)", short_name, control_sel);
PROTO_ITEM_SET_GENERATED(ti);
}
if (!is_request || (usb_trans_info->setup.request == USB_SETUP_SET_CUR))
{
gint value_size = tvb_reported_length_remaining(tvb, offset);
if (value_size != 0)
{
if ((entity_id == 0) && (usb_conv_info->interfaceSubclass == SC_VIDEOSTREAMING))
{
if ((control_sel == VS_PROBE_CONTROL) || (control_sel == VS_COMMIT_CONTROL))
{
int old_offset = offset;
offset = dissect_usb_vid_probe(tree, tvb, offset);
value_size -= (offset - old_offset);
}
}
else
{
if (usb_trans_info->setup.request == USB_SETUP_GET_INFO)
{
dissect_usb_vid_control_info(tree, tvb, offset);
offset++;
value_size--;
}
else if (usb_trans_info->setup.request == USB_SETUP_GET_LEN)
{
proto_tree_add_item(tree, hf_usb_vid_control_length, tvb, offset, 2, ENC_LITTLE_ENDIAN);
offset += 2;
value_size -= 2;
}
else if ( (usb_trans_info->setup.request == USB_SETUP_GET_CUR)
&& (entity_id == 0)
&& (usb_conv_info->interfaceSubclass == SC_VIDEOCONTROL)
&& (control_sel == VC_REQUEST_ERROR_CODE_CONTROL))
{
proto_tree_add_item(tree, hf_usb_vid_request_error, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
value_size--;
}
else
{
dissect_usb_vid_control_value(tree, tvb, offset, usb_trans_info->setup.request);
offset += value_size;
value_size = 0;
}
}
if (value_size > 0)
{
proto_tree_add_item(tree, hf_usb_vid_control_data, tvb, offset, -1, ENC_NA);
offset += value_size;
}
}
}
return offset;
}
/* Table for dispatch of video class SETUP transactions based on bRequest.
* At the moment this is overkill since the same function handles all defined
* requests.
*/
typedef int (*usb_setup_dissector)(packet_info *pinfo, proto_tree *tree,
tvbuff_t *tvb, int offset,
gboolean is_request,
usb_trans_info_t *usb_trans_info,
usb_conv_info_t *usb_conv_info);
typedef struct _usb_setup_dissector_table_t
{
guint8 request;
usb_setup_dissector dissector;
} usb_setup_dissector_table_t;
static const usb_setup_dissector_table_t setup_dissectors[] = {
{USB_SETUP_SET_CUR, dissect_usb_vid_get_set},
{USB_SETUP_SET_CUR_ALL, dissect_usb_vid_get_set},
{USB_SETUP_GET_CUR, dissect_usb_vid_get_set},
{USB_SETUP_GET_MIN, dissect_usb_vid_get_set},
{USB_SETUP_GET_MAX, dissect_usb_vid_get_set},
{USB_SETUP_GET_RES, dissect_usb_vid_get_set},
{USB_SETUP_GET_LEN, dissect_usb_vid_get_set},
{USB_SETUP_GET_INFO, dissect_usb_vid_get_set},
{USB_SETUP_GET_DEF, dissect_usb_vid_get_set},
{USB_SETUP_GET_CUR_ALL, dissect_usb_vid_get_set},
{USB_SETUP_GET_MIN_ALL, dissect_usb_vid_get_set},
{USB_SETUP_GET_MAX_ALL, dissect_usb_vid_get_set},
{USB_SETUP_GET_RES_ALL, dissect_usb_vid_get_set},
{0, NULL}
};
static const value_string setup_request_names_vals[] = {
{USB_SETUP_SET_CUR, "SET CUR"},
{USB_SETUP_SET_CUR_ALL, "SET CUR ALL"},
{USB_SETUP_GET_CUR, "GET CUR"},
{USB_SETUP_GET_MIN, "GET MIN"},
{USB_SETUP_GET_MAX, "GET MAX"},
{USB_SETUP_GET_RES, "GET RES"},
{USB_SETUP_GET_LEN, "GET LEN"},
{USB_SETUP_GET_INFO, "GET INFO"},
{USB_SETUP_GET_DEF, "GET DEF"},
{USB_SETUP_GET_CUR_ALL, "GET CUR ALL"},
{USB_SETUP_GET_MIN_ALL, "GET MIN ALL"},
{USB_SETUP_GET_MAX_ALL, "GET MAX ALL"},
{USB_SETUP_GET_RES_ALL, "GET RES ALL"},
{USB_SETUP_GET_DEF_ALL, "GET DEF ALL"},
{0, NULL}
};
/* Registered dissector for video class-specific control requests.
* Dispatch to an appropriate dissector function.
*
* @param tvb the tv_buff with the (remaining) packet data.
* On entry, the gaze is set to SETUP bRequest field.
* @param pinfo the packet info of this packet (additional info)
* @param tree the protocol tree to be built or NULL
* @param data Not used
*
* @return 0 no class specific dissector was found
* @return <0 not enough data
* @return >0 amount of data in the descriptor
*/
static int
dissect_usb_vid_control(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
{
gboolean is_request = (pinfo->srcport == NO_ENDPOINT);
usb_conv_info_t *usb_conv_info;
usb_trans_info_t *usb_trans_info;
int offset = 0;
usb_setup_dissector dissector = NULL;
const usb_setup_dissector_table_t *tmp;
/* Reject the packet if data or usb_trans_info are NULL */
if (data == NULL || ((usb_conv_info_t *)data)->usb_trans_info == NULL)
return 0;
usb_conv_info = (usb_conv_info_t *)data;
usb_trans_info = usb_conv_info->usb_trans_info;
/* See if we can find a class specific dissector for this request */
for (tmp=setup_dissectors; tmp->dissector; tmp++)
{
if (tmp->request == usb_trans_info->setup.request)
{
dissector = tmp->dissector;
break;
}
}
/* No we could not find any class specific dissector for this request
* return FALSE and let USB try any of the standard requests.
*/
if (!dissector)
return 0;
col_set_str(pinfo->cinfo, COL_PROTOCOL, "USBVIDEO");
col_add_fstr(pinfo->cinfo, COL_INFO, "%s %s",
val_to_str(usb_trans_info->setup.request, setup_request_names_vals, "Unknown type %x"),
is_request?"Request ":"Response");
if (is_request)
{
proto_tree_add_item(tree, hf_usb_vid_request, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset += 1;
}
offset = dissector(pinfo, tree, tvb, offset, is_request, usb_trans_info, usb_conv_info);
return offset;
}
/* Registered dissector for video class-specific URB_INTERRUPT
*
* @param tvb the tv_buff with the (remaining) packet data
* @param pinfo the packet info of this packet (additional info)
* @param tree the protocol tree to be built or NULL
* @param data Unused API parameter
*
* @return 0 no class specific dissector was found
* @return <0 not enough data
* @return >0 amount of data in the descriptor
*/
static int
dissect_usb_vid_interrupt(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
{
usb_conv_info_t *usb_conv_info;
gint bytes_available;
int offset = 0;
usb_conv_info = (usb_conv_info_t *)data;
bytes_available = tvb_reported_length_remaining(tvb, offset);
col_set_str(pinfo->cinfo, COL_PROTOCOL, "USBVIDEO");
if (bytes_available > 0)
{
guint8 originating_interface;
guint8 originating_entity;
originating_interface = tvb_get_guint8(tvb, offset) & INT_ORIGINATOR_MASK;
proto_tree_add_item(tree, hf_usb_vid_interrupt_bStatusType, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
originating_entity = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_interrupt_bOriginator, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
if (originating_interface == INT_VIDEOCONTROL)
{
guint8 control_sel;
guint8 attribute;
const gchar *control_name;
proto_tree_add_item(tree, hf_usb_vid_control_interrupt_bEvent, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
control_sel = tvb_get_guint8(tvb, offset);
control_name = get_control_selector_name(originating_entity, control_sel, usb_conv_info);
if (!control_name)
control_name = "Unknown";
proto_tree_add_uint_format_value(tree, hf_usb_vid_control_selector, tvb,
offset, 1, control_sel, "%s (0x%02x)",
control_name, control_sel);
offset++;
attribute = tvb_get_guint8(tvb, offset);
proto_tree_add_item(tree, hf_usb_vid_interrupt_bAttribute, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
switch (attribute)
{
case CONTROL_CHANGE_FAILURE:
proto_tree_add_item(tree, hf_usb_vid_request_error, tvb, offset, 1, ENC_LITTLE_ENDIAN);
offset++;
break;
case CONTROL_CHANGE_INFO:
offset = dissect_usb_vid_control_info(tree, tvb, offset);
break;
case CONTROL_CHANGE_VALUE:
case CONTROL_CHANGE_MIN:
case CONTROL_CHANGE_MAX:
dissect_usb_vid_control_value(tree, tvb, offset, attribute);
offset += tvb_reported_length_remaining(tvb, offset);
break;
default:
proto_tree_add_item(tree, hf_usb_vid_value_data, tvb, offset, -1, ENC_NA);
offset += tvb_reported_length_remaining(tvb, offset);
break;
}
}
else if (originating_interface == INT_VIDEOSTREAMING)
{
/* @todo */
}
}
else
offset = -2;
return offset;
}
void
proto_register_usb_vid(void)
{
static hf_register_info hf[] = {
/***** Setup *****/
{ &hf_usb_vid_request,
{ "bRequest", "usbvideo.setup.bRequest", FT_UINT8, BASE_HEX, VALS(setup_request_names_vals), 0x0,
NULL, HFILL }
},
{ &hf_usb_vid_length,
{ "wLength", "usbvideo.setup.wLength", FT_UINT16, BASE_DEC, NULL, 0x0,
NULL, HFILL }
},
/***** Request Error Control *****/
{ &hf_usb_vid_request_error,
{ "bRequestErrorCode", "usbvideo.reqerror.code",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&request_error_codes_ext, 0,
"Request Error Code", HFILL }
},
/***** Unit/Terminal Controls *****/
{ &hf_usb_vid_control_selector,
{ "Control Selector", "usbvideo.control.selector", FT_UINT8, BASE_HEX, NULL, 0x0,
"ID of the control within its entity", HFILL }
},
{ &hf_usb_vid_control_entity,
{ "Entity", "usbvideo.control.entity", FT_UINT8, BASE_HEX, NULL, 0x0,
"Unit or terminal to which the control belongs", HFILL }
},
{ &hf_usb_vid_control_interface,
{ "Interface", "usbvideo.control.interface", FT_UINT8, BASE_HEX, NULL, 0x0,
"Interface to which the control belongs", HFILL }
},
{ &hf_usb_vid_control_info,
{ "Info (Capabilities/State)", "usbvideo.control.info",
FT_UINT8, BASE_HEX, NULL, 0,
"Control capabilities and current state", HFILL }
},
{ &hf_usb_vid_control_info_D[0],
{ "Supports GET", "usbvideo.control.info.D0",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<0),
NULL, HFILL }
},
{ &hf_usb_vid_control_info_D[1],
{ "Supports SET", "usbvideo.control.info.D1",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<1),
NULL, HFILL }
},
{ &hf_usb_vid_control_info_D[2],
{ "Disabled due to automatic mode", "usbvideo.control.info.D2",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<2),
NULL, HFILL }
},
{ &hf_usb_vid_control_info_D[3],
{ "Autoupdate", "usbvideo.control.info.D3",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<3),
NULL, HFILL }
},
{ &hf_usb_vid_control_info_D[4],
{ "Asynchronous", "usbvideo.control.info.D4",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<4),
NULL, HFILL }
},
{ &hf_usb_vid_control_info_D[5],
{ "Disabled due to incompatibility with Commit state", "usbvideo.control.info.D5",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<5),
NULL, HFILL }
},
{ &hf_usb_vid_control_info_D[6],
{ "Reserved", "usbvideo.control.info.D6",
FT_UINT8, BASE_HEX, NULL, (3<<6),
NULL, HFILL }
},
{ &hf_usb_vid_control_length,
{ "Control Length", "usbvideo.control.len",
FT_UINT16, BASE_DEC, NULL, 0,
"Control size in bytes", HFILL }
},
{ &hf_usb_vid_control_default,
{ "Default value", "usbvideo.control.value.default",
FT_UINT32, BASE_DEC_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_usb_vid_control_min,
{ "Minimum value", "usbvideo.control.value.min",
FT_UINT32, BASE_DEC_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_usb_vid_control_max,
{ "Maximum value", "usbvideo.control.value.max",
FT_UINT32, BASE_DEC_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_usb_vid_control_res,
{ "Resolution", "usbvideo.control.value.res",
FT_UINT32, BASE_DEC_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_usb_vid_control_cur,
{ "Current value", "usbvideo.control.value.cur",
FT_UINT32, BASE_DEC_HEX, NULL, 0,
NULL, HFILL }
},
/***** Terminal Descriptors *****/
/* @todo Decide whether to unify .name fields */
{ &hf_usb_vid_control_ifdesc_iTerminal,
{ "iTerminal", "usbvideo.terminal.name", FT_UINT8, BASE_DEC, NULL, 0x0,
"String Descriptor describing this terminal", HFILL }
},
/* @todo Decide whether to unify .terminal.id and .unit.id under .entityID */
{ &hf_usb_vid_control_ifdesc_terminal_id,
{ "bTerminalID", "usbvideo.terminal.id", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL }
},
{ &hf_usb_vid_control_ifdesc_terminal_type,
{ "wTerminalType", "usbvideo.terminal.type",
FT_UINT16, BASE_HEX | BASE_EXT_STRING, &vc_terminal_types_ext, 0,
NULL, HFILL }
},
{ &hf_usb_vid_control_ifdesc_assoc_terminal,
{ "bAssocTerminal", "usbvideo.terminal.assocTerminal", FT_UINT8, BASE_DEC, NULL, 0x0,
"Associated Terminal", HFILL }
},
/***** Camera Terminal Descriptor *****/
{ &hf_usb_vid_cam_objective_focal_len_min,
{ "wObjectiveFocalLengthMin", "usbvideo.camera.objectiveFocalLengthMin",
FT_UINT16, BASE_DEC, NULL, 0,
"Minimum Focal Length for Optical Zoom", HFILL }
},
{ &hf_usb_vid_cam_objective_focal_len_max,
{ "wObjectiveFocalLengthMax", "usbvideo.camera.objectiveFocalLengthMax",
FT_UINT16, BASE_DEC, NULL, 0,
"Minimum Focal Length for Optical Zoom", HFILL }
},
{ &hf_usb_vid_cam_ocular_focal_len,
{ "wOcularFocalLength", "usbvideo.camera.ocularFocalLength",
FT_UINT16, BASE_DEC, NULL, 0,
"Ocular Focal Length for Optical Zoom", HFILL }
},
{ &hf_usb_vid_cam_control_D[0],
{ "Scanning Mode", "usbvideo.camera.control.D0",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<0),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[1],
{ "Auto Exposure Mode", "usbvideo.camera.control.D1",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<1),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[2],
{ "Auto Exposure Priority", "usbvideo.camera.control.D2",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<2),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[3],
{ "Exposure Time (Absolute)", "usbvideo.camera.control.D3",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<3),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[4],
{ "Exposure Time (Relative)", "usbvideo.camera.control.D4",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<4),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[5],
{ "Focus (Absolute)", "usbvideo.camera.control.D5",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<5),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[6],
{ "Focus (Relative)", "usbvideo.camera.control.D6",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<6),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[7],
{ "Iris (Absolute)", "usbvideo.camera.control.D7",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<7),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[8],
{ "Iris (Relative)", "usbvideo.camera.control.D8",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<8),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[9],
{ "Zoom (Absolute)", "usbvideo.camera.control.D9",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<9),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[10],
{ "Zoom (Relative)", "usbvideo.camera.control.D10",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<10),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[11],
{ "PanTilt (Absolute)", "usbvideo.camera.control.D11",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<11),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[12],
{ "PanTilt (Relative)", "usbvideo.camera.control.D12",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<12),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[13],
{ "Roll (Absolute)", "usbvideo.camera.control.D13",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<13),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[14],
{ "Roll (Relative)", "usbvideo.camera.control.D14",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<14),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[15],
{ "D15", "usbvideo.camera.control.D15",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<15),
"Reserved", HFILL }
},
{ &hf_usb_vid_cam_control_D[16],
{ "D16", "usbvideo.camera.control.D16",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<16),
"Reserved", HFILL }
},
{ &hf_usb_vid_cam_control_D[17],
{ "Auto Focus", "usbvideo.camera.control.D17",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<17),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[18],
{ "Privacy", "usbvideo.camera.control.D18",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<18),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[19],
{ "Focus (Simple)", "usbvideo.camera.control.D19",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<19),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[20],
{ "Window", "usbvideo.camera.control.D20",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<20),
NULL, HFILL }
},
{ &hf_usb_vid_cam_control_D[21],
{ "Region of Interest", "usbvideo.camera.control.D21",
FT_BOOLEAN,
array_length(hf_usb_vid_cam_control_D),
TFS(&tfs_yes_no), (1<<21),
NULL, HFILL }
},
/***** Unit Descriptors *****/
{ &hf_usb_vid_control_ifdesc_unit_id,
{ "bUnitID", "usbvideo.unit.id", FT_UINT8, BASE_DEC, NULL, 0x0,
NULL, HFILL }
},
{ &hf_usb_vid_num_inputs,
{ "bNrInPins", "usbvideo.unit.numInputs",
FT_UINT8, BASE_DEC, NULL, 0,
"Number of input pins", HFILL }
},
{ &hf_usb_vid_sources,
{ "baSourceID", "usbvideo.unit.sources",
FT_BYTES, BASE_NONE, NULL, 0,
"Input entity IDs", HFILL }
},
/***** Processing Unit Descriptor *****/
{ &hf_usb_vid_iProcessing,
{ "iProcessing", "usbvideo.processor.name", FT_UINT8, BASE_DEC, NULL, 0x0,
"String Descriptor describing this terminal", HFILL }
},
{ &hf_usb_vid_proc_control_D[0],
{ "Brightness", "usbvideo.processor.control.D0",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<0),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[1],
{ "Contrast", "usbvideo.processor.control.D1",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<1),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[2],
{ "Hue", "usbvideo.processor.control.D2",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<2),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[3],
{ "Saturation", "usbvideo.processor.control.D3",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<3),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[4],
{ "Sharpness", "usbvideo.processor.control.D4",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<4),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[5],
{ "Gamma", "usbvideo.processor.control.D5",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<5),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[6],
{ "White Balance Temperature", "usbvideo.processor.control.D6",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<6),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[7],
{ "White Balance Component", "usbvideo.processor.control.D7",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<7),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[8],
{ "Backlight Compensation", "usbvideo.processor.control.D8",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<8),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[9],
{ "Gain", "usbvideo.processor.control.D9",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<9),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[10],
{ "Power Line Frequency", "usbvideo.processor.control.D10",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<10),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[11],
{ "Hue, Auto", "usbvideo.processor.control.D11",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<11),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[12],
{ "White Balance Temperature, Auto", "usbvideo.processor.control.D12",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<12),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[13],
{ "White Balance Component, Auto", "usbvideo.processor.control.D13",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<13),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[14],
{ "Digital Multiplier", "usbvideo.processor.control.D14",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<14),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[15],
{ "Digital Multiplier Limit", "usbvideo.processor.control.D15",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<15),
"Reserved", HFILL }
},
{ &hf_usb_vid_proc_control_D[16],
{ "Analog Video Standard", "usbvideo.processor.control.D16",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<16),
"Reserved", HFILL }
},
{ &hf_usb_vid_proc_control_D[17],
{ "Analog Video Lock Status", "usbvideo.processor.control.D17",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<17),
NULL, HFILL }
},
{ &hf_usb_vid_proc_control_D[18],
{ "Contrast, Auto", "usbvideo.processor.control.D18",
FT_BOOLEAN, 24, TFS(&tfs_yes_no), (1<<18),
NULL, HFILL }
},
{ &hf_usb_vid_proc_standards,
{ "bmVideoStandards", "usbvideo.processor.standards",
FT_UINT8, BASE_HEX, NULL, 0,
"Supported analog video standards", HFILL }
},
{ &hf_usb_vid_proc_standards_D[0],
{ "None", "usbvideo.processor.standards.D0",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<0),
NULL, HFILL }
},
{ &hf_usb_vid_proc_standards_D[1],
{ "NTSC - 525/60", "usbvideo.processor.standards.D1",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<1),
NULL, HFILL }
},
{ &hf_usb_vid_proc_standards_D[2],
{ "PAL - 625/50", "usbvideo.processor.standards.D2",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<2),
NULL, HFILL }
},
{ &hf_usb_vid_proc_standards_D[3],
{ "SECAM - 625/50", "usbvideo.processor.standards.D3",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<3),
NULL, HFILL }
},
{ &hf_usb_vid_proc_standards_D[4],
{ "NTSC - 625/50", "usbvideo.processor.standards.D4",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<4),
NULL, HFILL }
},
{ &hf_usb_vid_proc_standards_D[5],
{ "PAL - 525/60", "usbvideo.processor.standards.D5",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<5),
NULL, HFILL }
},
{ &hf_usb_vid_max_multiplier,
{ "wMaxMultiplier", "usbvideo.processor.maxMultiplier",
FT_UINT16, BASE_DEC, NULL, 0,
"100 x max digital multiplication", HFILL }
},
/***** Selector Unit Descriptor *****/
{ &hf_usb_vid_iSelector,
{ "iSelector", "usbvideo.selector.name", FT_UINT8, BASE_DEC, NULL, 0x0,
"String Descriptor describing this terminal", HFILL }
},
/***** Extension Unit Descriptor *****/
{ &hf_usb_vid_iExtension,
{ "iExtension", "usbvideo.extension.name", FT_UINT8, BASE_DEC, NULL, 0x0,
"String Descriptor describing this terminal", HFILL }
},
{ &hf_usb_vid_exten_guid,
{ "guid", "usbvideo.extension.guid",
FT_GUID, BASE_NONE, NULL, 0,
"Identifier", HFILL }
},
{ &hf_usb_vid_exten_num_controls,
{ "bNumControls", "usbvideo.extension.numControls",
FT_UINT8, BASE_DEC, NULL, 0,
"Number of controls", HFILL }
},
/***** Probe/Commit *****/
{ &hf_usb_vid_probe_hint,
{ "bmHint", "usbvideo.probe.hint",
FT_UINT16, BASE_HEX, NULL, 0,
"Fields to hold constant during negotiation", HFILL }
},
{ &hf_usb_vid_probe_hint_D[0],
{ "dwFrameInterval", "usbvideo.probe.hint.D0",
FT_BOOLEAN, 5, TFS(&probe_hint_meaning), (1<<0),
"Frame Rate", HFILL }
},
{ &hf_usb_vid_probe_hint_D[1],
{ "wKeyFrameRate", "usbvideo.probe.hint.D1",
FT_BOOLEAN, 5, TFS(&probe_hint_meaning), (1<<1),
"Key Frame Rate", HFILL }
},
{ &hf_usb_vid_probe_hint_D[2],
{ "wPFrameRate", "usbvideo.probe.hint.D2",
FT_BOOLEAN, 5, TFS(&probe_hint_meaning), (1<<2),
"P-Frame Rate", HFILL }
},
{ &hf_usb_vid_probe_hint_D[3],
{ "wCompQuality", "usbvideo.probe.hint.D3",
FT_BOOLEAN, 5, TFS(&probe_hint_meaning), (1<<3),
"Compression Quality", HFILL }
},
{ &hf_usb_vid_probe_hint_D[4],
{ "wCompWindowSize", "usbvideo.probe.hint.D4",
FT_BOOLEAN, 5, TFS(&probe_hint_meaning), (1<<4),
"Compression Window Size", HFILL }
},
{ &hf_usb_vid_probe_key_frame_rate,
{ "wKeyFrameRate", "usbvideo.probe.keyFrameRate",
FT_UINT16, BASE_DEC, NULL, 0,
"Key frame rate", HFILL }
},
{ &hf_usb_vid_probe_p_frame_rate,
{ "wPFrameRate", "usbvideo.probe.pFrameRate",
FT_UINT16, BASE_DEC, NULL, 0,
"P frame rate", HFILL }
},
{ &hf_usb_vid_probe_comp_quality,
{ "wCompQuality", "usbvideo.probe.compQuality",
FT_UINT16, BASE_DEC, NULL, 0,
"Compression quality [0-10000]", HFILL }
},
{ &hf_usb_vid_probe_comp_window,
{ "wCompWindow", "usbvideo.probe.compWindow",
FT_UINT16, BASE_DEC, NULL, 0,
"Window size for average bit rate control", HFILL }
},
{ &hf_usb_vid_probe_delay,
{ "wDelay", "usbvideo.probe.delay",
FT_UINT16, BASE_DEC, NULL, 0,
"Latency in ms from capture to USB", HFILL }
},
{ &hf_usb_vid_probe_max_frame_sz,
{ "dwMaxVideoFrameSize", "usbvideo.probe.maxVideoFrameSize",
FT_UINT32, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_usb_vid_probe_max_payload_sz,
{ "dwMaxPayloadTransferSize", "usbvideo.probe.maxPayloadTransferSize",
FT_UINT32, BASE_DEC, NULL, 0,
NULL, HFILL }
},
{ &hf_usb_vid_probe_clock_freq,
{ "dwClockFrequency", "usbvideo.probe.clockFrequency",
FT_UINT32, BASE_DEC, NULL, 0,
"Device clock frequency in Hz", HFILL }
},
{ &hf_usb_vid_probe_framing,
{ "bmFramingInfo", "usbvideo.probe.framing",
FT_UINT16, BASE_HEX, NULL, 0,
NULL, HFILL }
},
{ &hf_usb_vid_probe_framing_D[0],
{ "Frame ID required", "usbvideo.probe.framing.D0",
FT_BOOLEAN, 2, TFS(&tfs_yes_no), (1<<0),
NULL, HFILL }
},
{ &hf_usb_vid_probe_framing_D[1],
{ "EOF utilized", "usbvideo.probe.framing.D1",
FT_BOOLEAN, 2, TFS(&tfs_yes_no), (1<<1),
NULL, HFILL }
},
{ &hf_usb_vid_probe_preferred_ver,
{ "bPreferredVersion", "usbvideo.probe.preferredVersion",
FT_UINT8, BASE_DEC, NULL, 0,
"Preferred payload format version", HFILL }
},
{ &hf_usb_vid_probe_min_ver,
{ "bMinVersion", "usbvideo.probe.minVersion",
FT_UINT8, BASE_DEC, NULL, 0,
"Min supported payload format version", HFILL }
},
{ &hf_usb_vid_probe_max_ver,
{ "bPreferredVersion", "usbvideo.probe.maxVer",
FT_UINT8, BASE_DEC, NULL, 0,
"Max supported payload format version", HFILL }
},
{ &hf_usb_vid_control_ifdesc_dwClockFrequency,
{ "dwClockFrequency", "usbvideo.probe.clockFrequency",
FT_UINT32, BASE_DEC, NULL, 0,
"Device clock frequency (Hz) for selected format", HFILL }
},
/***** Format Descriptors *****/
{ &hf_usb_vid_format_index,
{ "bFormatIndex", "usbvideo.format.index",
FT_UINT8, BASE_DEC, NULL, 0,
"Index of this format descriptor", HFILL }
},
{ &hf_usb_vid_format_num_frame_descriptors,
{ "bNumFrameDescriptors", "usbvideo.format.numFrameDescriptors",
FT_UINT8, BASE_DEC, NULL, 0,
"Number of frame descriptors for this format", HFILL }
},
{ &hf_usb_vid_format_guid,
{ "guidFormat", "usbvideo.format.guid",
FT_GUID, BASE_NONE, NULL, 0,
"Stream encoding format", HFILL }
},
{ &hf_usb_vid_format_bits_per_pixel,
{ "bBitsPerPixel", "usbvideo.format.bitsPerPixel",
FT_UINT8, BASE_DEC, NULL, 0,
"Bits per pixel", HFILL }
},
{ &hf_usb_vid_default_frame_index,
{ "bDefaultFrameIndex", "usbvideo.format.defaultFrameIndex",
FT_UINT8, BASE_DEC, NULL, 0,
"Optimum frame index for this stream", HFILL }
},
{ &hf_usb_vid_aspect_ratio_x,
{ "bAspectRatioX", "usbvideo.format.aspectRatioX",
FT_UINT8, BASE_DEC, NULL, 0,
"X dimension of picture aspect ratio", HFILL }
},
{ &hf_usb_vid_aspect_ratio_y,
{ "bAspectRatioY", "usbvideo.format.aspectRatioY",
FT_UINT8, BASE_DEC, NULL, 0,
"Y dimension of picture aspect ratio", HFILL }
},
{ &hf_usb_vid_interlace_flags,
{ "bmInterlaceFlags", "usbvideo.format.interlace",
FT_UINT8, BASE_HEX, NULL, 0x0,
NULL, HFILL }
},
{ &hf_usb_vid_is_interlaced,
{ "Interlaced stream", "usbvideo.format.interlace.D0",
FT_BOOLEAN, 8, TFS(&is_interlaced_meaning), (1<<0),
NULL, HFILL }
},
{ &hf_usb_vid_interlaced_fields,
{ "Fields per frame", "usbvideo.format.interlace.D1",
FT_BOOLEAN, 8, TFS(&interlaced_fields_meaning), (1<<1),
NULL, HFILL }
},
{ &hf_usb_vid_field_1_first,
{ "Field 1 first", "usbvideo.format.interlace.D2",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<2),
NULL, HFILL }
},
{ &hf_usb_vid_field_pattern,
{ "Field pattern", "usbvideo.format.interlace.pattern",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&field_pattern_meaning_ext, (3<<4),
NULL, HFILL }
},
{ &hf_usb_vid_copy_protect,
{ "bCopyProtect", "usbvideo.format.copyProtect",
FT_UINT8, BASE_DEC, VALS(copy_protect_meaning), 0,
NULL, HFILL }
},
{ &hf_usb_vid_variable_size,
{ "Variable size", "usbvideo.format.variableSize",
FT_BOOLEAN, BASE_DEC, NULL, 0,
NULL, HFILL }
},
/***** MJPEG Format Descriptor *****/
{ &hf_usb_vid_mjpeg_flags,
{ "bmFlags", "usbvideo.mjpeg.flags",
FT_UINT8, BASE_HEX, NULL, 0,
"Characteristics", HFILL }
},
{ &hf_usb_vid_mjpeg_fixed_samples,
{ "Fixed size samples", "usbvideo.mjpeg.fixed_size",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<0),
NULL, HFILL }
},
/***** Frame Descriptors *****/
{ &hf_usb_vid_frame_index,
{ "bFrameIndex", "usbvideo.frame.index",
FT_UINT8, BASE_DEC, NULL, 0,
"Index of this frame descriptor", HFILL }
},
{ &hf_usb_vid_frame_capabilities,
{ "bmCapabilities", "usbvideo.frame.capabilities",
FT_UINT8, BASE_HEX, NULL, 0,
"Capabilities", HFILL }
},
{ &hf_usb_vid_frame_stills_supported,
{ "Still image", "usbvideo.frame.stills",
FT_BOOLEAN, 8, TFS(&tfs_supported_not_supported), (1<<0),
NULL, HFILL }
},
{ &hf_usb_vid_frame_interval,
{ "dwFrameInterval", "usbvideo.frame.interval",
FT_UINT32, BASE_DEC, NULL, 0,
"Frame interval multiple of 100 ns", HFILL }
},
{ &hf_usb_vid_frame_fixed_frame_rate,
{ "Fixed frame rate", "usbvideo.frame.fixedRate",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<1),
NULL, HFILL }
},
{ &hf_usb_vid_frame_width,
{ "wWidth", "usbvideo.frame.width",
FT_UINT16, BASE_DEC, NULL, 0,
"Width of frame in pixels", HFILL }
},
{ &hf_usb_vid_frame_height,
{ "wHeight", "usbvideo.frame.height",
FT_UINT16, BASE_DEC, NULL, 0,
"Height of frame in pixels", HFILL }
},
{ &hf_usb_vid_frame_min_bit_rate,
{ "dwMinBitRate", "usbvideo.frame.minBitRate",
FT_UINT32, BASE_DEC, NULL, 0,
"Minimum bit rate in bps", HFILL }
},
{ &hf_usb_vid_frame_max_bit_rate,
{ "dwMaxBitRate", "usbvideo.frame.maxBitRate",
FT_UINT32, BASE_DEC, NULL, 0,
"Maximum bit rate in bps", HFILL }
},
{ &hf_usb_vid_frame_max_frame_sz,
{ "dwMaxVideoFrameBufferSize", "usbvideo.frame.maxBuffer",
FT_UINT32, BASE_DEC, NULL, 0,
"Maximum bytes per frame", HFILL }
},
{ &hf_usb_vid_frame_default_interval,
{ "dwDefaultFrameInterval", "usbvideo.frame.interval.default",
FT_UINT32, BASE_DEC, NULL, 0,
"Suggested default", HFILL }
},
{ &hf_usb_vid_frame_interval_type,
{ "bFrameIntervalType", "usbvideo.frame.interval.type",
FT_UINT8, BASE_DEC, NULL, 0,
"Frame rate control (continuous/discrete)", HFILL }
},
{ &hf_usb_vid_frame_min_interval,
{ "dwMinFrameInterval", "usbvideo.frame.interval.min",
FT_UINT32, BASE_DEC, NULL, 0,
"Shortest frame interval (* 100 ns)", HFILL }
},
{ &hf_usb_vid_frame_max_interval,
{ "dwMaxFrameInterval", "usbvideo.frame.interval.max",
FT_UINT32, BASE_DEC, NULL, 0,
"Longest frame interval (* 100 ns)", HFILL }
},
{ &hf_usb_vid_frame_step_interval,
{ "dwMinFrameInterval", "usbvideo.frame.interval.step",
FT_UINT32, BASE_DEC, NULL, 0,
"Granularity of frame interval (* 100 ns)", HFILL }
},
{ &hf_usb_vid_frame_bytes_per_line,
{ "dwBytesPerLine", "usbvideo.frame.bytesPerLine",
FT_UINT32, BASE_DEC, NULL, 0,
"Fixed number of bytes per video line", HFILL }
},
/***** Colorformat Descriptor *****/
{ &hf_usb_vid_color_primaries,
{ "bColorPrimaries", "usbvideo.color.primaries",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&color_primaries_meaning_ext, 0,
NULL, HFILL }
},
{ &hf_usb_vid_transfer_characteristics,
{ "bTransferCharacteristics", "usbvideo.color.transferCharacteristics",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&color_transfer_characteristics_ext, 0,
NULL, HFILL }
},
{ &hf_usb_vid_matrix_coefficients,
{ "bMatrixCoefficients", "usbvideo.color.matrixCoefficients",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&matrix_coefficients_meaning_ext, 0,
NULL, HFILL }
},
/***** Video Control Header Descriptor *****/
{ &hf_usb_vid_control_ifdesc_bcdUVC,
{ "bcdUVC", "usbvideo.bcdUVC",
FT_UINT16, BASE_HEX, NULL, 0,
"Video Device Class Specification release number", HFILL }
},
{ &hf_usb_vid_control_ifdesc_bInCollection,
{ "bInCollection", "usbvideo.numStreamingInterfaces",
FT_UINT8, BASE_DEC, NULL, 0,
"Number of VideoStreaming interfaces", HFILL }
},
{ &hf_usb_vid_control_ifdesc_baInterfaceNr,
{ "baInterfaceNr", "usbvideo.streamingInterfaceNumbers",
FT_BYTES, BASE_NONE, NULL, 0,
"Interface numbers of VideoStreaming interfaces", HFILL }},
/***** Video Streaming Input Header Descriptor *****/
{ &hf_usb_vid_streaming_ifdesc_bNumFormats,
{ "bNumFormats", "usbvideo.streaming.numFormats",
FT_UINT8, BASE_DEC, NULL, 0,
"Number of video payload format descriptors", HFILL }
},
{ &hf_usb_vid_streaming_bmInfo,
{ "bmInfo", "usbvideo.streaming.info",
FT_UINT8, BASE_HEX, NULL, 0,
"Capabilities", HFILL }
},
{ &hf_usb_vid_streaming_info_D[0],
{ "Dynamic Format Change", "usbvideo.streaming.info.D0",
FT_BOOLEAN, 8, TFS(&tfs_yes_no), (1<<0),
"Dynamic Format Change", HFILL }
},
{ &hf_usb_vid_streaming_control_D[0],
{ "wKeyFrameRate", "usbvideo.streaming.control.D0",
FT_BOOLEAN, 6, TFS(&tfs_yes_no), (1<<0),
"Probe and Commit support", HFILL }
},
{ &hf_usb_vid_streaming_control_D[1],
{ "wPFrameRate", "usbvideo.streaming.control.D1",
FT_BOOLEAN, 6, TFS(&tfs_yes_no), (1<<1),
"Probe and Commit support", HFILL }
},
{ &hf_usb_vid_streaming_control_D[2],
{ "wCompQuality", "usbvideo.streaming.control.D2",
FT_BOOLEAN, 6, TFS(&tfs_yes_no), (1<<2),
"Probe and Commit support", HFILL }
},
{ &hf_usb_vid_streaming_control_D[3],
{ "wCompWindowSize", "usbvideo.streaming.control.D3",
FT_BOOLEAN, 6, TFS(&tfs_yes_no), (1<<3),
"Probe and Commit support", HFILL }
},
{ &hf_usb_vid_streaming_control_D[4],
{ "Generate Key Frame", "usbvideo.streaming.control.D4",
FT_BOOLEAN, 6, TFS(&tfs_yes_no), (1<<4),
"Probe and Commit support", HFILL }
},
{ &hf_usb_vid_streaming_control_D[5],
{ "Update Frame Segment", "usbvideo.streaming.control.D5",
FT_BOOLEAN, 6, TFS(&tfs_yes_no), (1<<5),
"Probe and Commit support", HFILL }
},
{ &hf_usb_vid_streaming_terminal_link,
{ "bTerminalLink", "usbvideo.streaming.terminalLink", FT_UINT8, BASE_DEC, NULL, 0x0,
"Output terminal ID", HFILL }
},
{ &hf_usb_vid_streaming_still_capture_method,
{ "bStillCaptureMethod", "usbvideo.streaming.stillCaptureMethod",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&vs_still_capture_methods_ext, 0,
"Method of Still Image Capture", HFILL }
},
{ &hf_usb_vid_streaming_trigger_support,
{ "HW Triggering", "usbvideo.streaming.triggerSupport",
FT_BOOLEAN, BASE_DEC, TFS(&tfs_supported_not_supported), 0,
"Is HW triggering supported", HFILL }
},
{ &hf_usb_vid_streaming_trigger_usage,
{ "bTriggerUsage", "usbvideo.streaming.triggerUsage",
FT_UINT8, BASE_DEC, VALS(vs_trigger_usage), 0,
"How host SW should respond to trigger", HFILL }
},
/***** Interrupt URB *****/
{ &hf_usb_vid_interrupt_bStatusType,
{ "Status Type", "usbvideo.interrupt.statusType",
FT_UINT8, BASE_HEX, VALS(interrupt_status_types), 0xF,
NULL, HFILL }
},
{ &hf_usb_vid_interrupt_bAttribute,
{ "Change Type", "usbvideo.interrupt.attribute",
FT_UINT8, BASE_HEX | BASE_EXT_STRING,
&control_change_types_ext, 0,
"Type of control change", HFILL }
},
{ &hf_usb_vid_interrupt_bOriginator,
{ "Originator", "usbvideo.interrupt.originator",
FT_UINT8, BASE_DEC, NULL, 0,
"ID of the entity that reports this interrupt", HFILL }
},
{ &hf_usb_vid_control_interrupt_bEvent,
{ "Event", "usbvideo.interrupt.controlEvent",
FT_UINT8, BASE_HEX, VALS(control_interrupt_events), 0,
"Type of event", HFILL }
},
/***** Video Control Endpoint Descriptor *****/
{ &hf_usb_vid_epdesc_subtype,
{ "Subtype", "usbvideo.ep.descriptorSubType",
FT_UINT8, BASE_DEC, VALS(vc_ep_descriptor_subtypes), 0,
"Descriptor Subtype", HFILL }
},
{ &hf_usb_vid_epdesc_max_transfer_sz,
{ "wMaxTransferSize", "usbvideo.ep.maxInterruptSize", FT_UINT16,
BASE_DEC, NULL, 0x0, "Max interrupt structure size", HFILL }
},
/***** Fields used in multiple contexts *****/
{ &hf_usb_vid_ifdesc_wTotalLength,
{ "wTotalLength", "usbvideo.totalLength",
FT_UINT16, BASE_DEC, NULL, 0,
"Video interface descriptor size", HFILL }
},
{ &hf_usb_vid_bControlSize,
{ "bControlSize", "usbvideo.bmcontrolSize",
FT_UINT8, BASE_DEC, NULL, 0,
"Size of bmControls field", HFILL }
},
{ &hf_usb_vid_bmControl,
{ "bmControl", "usbvideo.availableControls",
FT_UINT32, BASE_HEX, NULL, 0,
"Available controls", HFILL }
},
{ &hf_usb_vid_bmControl_bytes,
{ "bmControl", "usbvideo.availableControls.bytes",
FT_BYTES, BASE_NONE, NULL, 0,
"Available controls", HFILL }
},
{ &hf_usb_vid_control_ifdesc_src_id,
{ "bSourceID", "usbvideo.sourceID", FT_UINT8, BASE_DEC, NULL, 0x0,
"Entity to which this terminal/unit is connected", HFILL }
},
/**********/
{ &hf_usb_vid_control_ifdesc_subtype,
{ "Subtype", "usbvideo.control.descriptorSubType",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&vc_if_descriptor_subtypes_ext, 0,
"Descriptor Subtype", HFILL }
},
{ &hf_usb_vid_streaming_ifdesc_subtype,
{ "Subtype", "usbvideo.streaming.descriptorSubType",
FT_UINT8, BASE_DEC | BASE_EXT_STRING,
&vs_if_descriptor_subtypes_ext, 0,
"Descriptor Subtype", HFILL }
},
{ &hf_usb_vid_descriptor_data,
{ "Descriptor data", "usbvideo.descriptor_data", FT_BYTES, BASE_NONE, NULL, 0x0,
NULL, HFILL }
},
{ &hf_usb_vid_control_data,
{ "Control data", "usbvideo.control_data", FT_BYTES, BASE_NONE, NULL, 0x0,
NULL, HFILL }
},
{ &hf_usb_vid_control_value,
{ "Control value", "usbvideo.control_value", FT_BYTES, BASE_NONE, NULL, 0x0,
NULL, HFILL }
},
{ &hf_usb_vid_value_data,
{ "Value data", "usbvideo.value_data", FT_BYTES, BASE_NONE, NULL, 0x0,
NULL, HFILL }
},
};
static gint *usb_vid_subtrees[] = {
&ett_usb_vid,
&ett_descriptor_video_endpoint,
&ett_descriptor_video_control,
&ett_descriptor_video_streaming,
&ett_camera_controls,
&ett_processing_controls,
&ett_streaming_controls,
&ett_streaming_info,
&ett_interlace_flags,
&ett_frame_capability_flags,
&ett_mjpeg_flags,
&ett_video_probe,
&ett_probe_hint,
&ett_probe_framing,
&ett_video_standards,
&ett_control_capabilities
};
static ei_register_info ei[] = {
{ &ei_usb_vid_subtype_unknown, { "usbvideo.subtype.unknown", PI_UNDECODED, PI_WARN, "Unknown VC subtype", EXPFILL }},
{ &ei_usb_vid_bitmask_len, { "usbvideo.bitmask_len_error", PI_UNDECODED, PI_WARN, "Only least-significant bytes decoded", EXPFILL }},
};
expert_module_t* expert_usb_vid;
proto_usb_vid = proto_register_protocol("USB Video", "USBVIDEO", "usbvideo");
proto_register_field_array(proto_usb_vid, hf, array_length(hf));
proto_register_subtree_array(usb_vid_subtrees, array_length(usb_vid_subtrees));
expert_usb_vid = expert_register_protocol(proto_usb_vid);
expert_register_field_array(expert_usb_vid, ei, array_length(ei));
}
void
proto_reg_handoff_usb_vid(void)
{
dissector_handle_t usb_vid_control_handle;
dissector_handle_t usb_vid_descriptor_handle;
dissector_handle_t usb_vid_interrupt_handle;
usb_vid_control_handle = create_dissector_handle(dissect_usb_vid_control, proto_usb_vid);
dissector_add_uint("usb.control", IF_CLASS_VIDEO, usb_vid_control_handle);
usb_vid_descriptor_handle = create_dissector_handle(dissect_usb_vid_descriptor, proto_usb_vid);
dissector_add_uint("usb.descriptor", IF_CLASS_VIDEO, usb_vid_descriptor_handle);
usb_vid_interrupt_handle = create_dissector_handle(dissect_usb_vid_interrupt, proto_usb_vid);
dissector_add_uint("usb.interrupt", IF_CLASS_VIDEO, usb_vid_interrupt_handle);
}
/*
* Editor modelines - http://www.wireshark.org/tools/modelines.html
*
* Local variables:
* c-basic-offset: 4
* tab-width: 8
* indent-tabs-mode: nil
* End:
*
* vi: set shiftwidth=4 tabstop=8 expandtab:
* :indentSize=4:tabSize=8:noTabs=true:
*/
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_5108_3 |
crossvul-cpp_data_good_2744_0 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* ROUTE - implementation of the IP router.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Linus Torvalds, <Linus.Torvalds@helsinki.fi>
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Fixes:
* Alan Cox : Verify area fixes.
* Alan Cox : cli() protects routing changes
* Rui Oliveira : ICMP routing table updates
* (rco@di.uminho.pt) Routing table insertion and update
* Linus Torvalds : Rewrote bits to be sensible
* Alan Cox : Added BSD route gw semantics
* Alan Cox : Super /proc >4K
* Alan Cox : MTU in route table
* Alan Cox : MSS actually. Also added the window
* clamper.
* Sam Lantinga : Fixed route matching in rt_del()
* Alan Cox : Routing cache support.
* Alan Cox : Removed compatibility cruft.
* Alan Cox : RTF_REJECT support.
* Alan Cox : TCP irtt support.
* Jonathan Naylor : Added Metric support.
* Miquel van Smoorenburg : BSD API fixes.
* Miquel van Smoorenburg : Metrics.
* Alan Cox : Use __u32 properly
* Alan Cox : Aligned routing errors more closely with BSD
* our system is still very different.
* Alan Cox : Faster /proc handling
* Alexey Kuznetsov : Massive rework to support tree based routing,
* routing caches and better behaviour.
*
* Olaf Erb : irtt wasn't being copied right.
* Bjorn Ekwall : Kerneld route support.
* Alan Cox : Multicast fixed (I hope)
* Pavel Krauz : Limited broadcast fixed
* Mike McLagan : Routing by source
* Alexey Kuznetsov : End of old history. Split to fib.c and
* route.c and rewritten from scratch.
* Andi Kleen : Load-limit warning messages.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Vitaly E. Lavrov : Race condition in ip_route_input_slow.
* Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
* Vladimir V. Ivanov : IP rule info (flowid) is really useful.
* Marc Boucher : routing by fwmark
* Robert Olsson : Added rt_cache statistics
* Arnaldo C. Melo : Convert proc stuff to seq_file
* Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
* Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
* Ilia Sotnikov : Removed TOS from hash calculations
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "IPv4: " fmt
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
#include "fib_lookup.h"
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define RT_GC_TIMEOUT (300*HZ)
static int ip_rt_max_size;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
/*
* Interface to generic destination cache.
*/
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
static unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
WARN_ON(1);
return NULL;
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
.redirect = ip_do_redirect,
.local_out = __ip_local_out,
.neigh_lookup = ipv4_neigh_lookup,
.confirm_neigh = ipv4_confirm_neigh,
};
#define ECN_OR_COST(class) TC_PRIO_##class
const __u8 ip_tos2prio[16] = {
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK)
};
EXPORT_SYMBOL(ip_tos2prio);
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos)
return NULL;
return SEQ_START_TOKEN;
}
static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
}
static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-127s\n",
"Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
"Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
"HHUptod\tSpecDst");
return 0;
}
static const struct seq_operations rt_cache_seq_ops = {
.start = rt_cache_seq_start,
.next = rt_cache_seq_next,
.stop = rt_cache_seq_stop,
.show = rt_cache_seq_show,
};
static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rt_cache_seq_ops);
}
static const struct file_operations rt_cache_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
struct rt_cache_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
return 0;
}
seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
" %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
dst_entries_get_slow(&ipv4_dst_ops),
0, /* st->in_hit */
st->in_slow_tot,
st->in_slow_mc,
st->in_no_route,
st->in_brd,
st->in_martian_dst,
st->in_martian_src,
0, /* st->out_hit */
st->out_slow_tot,
st->out_slow_mc,
0, /* st->gc_total */
0, /* st->gc_ignored */
0, /* st->gc_goal_miss */
0, /* st->gc_dst_overflow */
0, /* st->in_hlist_search */
0 /* st->out_hlist_search */
);
return 0;
}
static const struct seq_operations rt_cpu_seq_ops = {
.start = rt_cpu_seq_start,
.next = rt_cpu_seq_next,
.stop = rt_cpu_seq_stop,
.show = rt_cpu_seq_show,
};
static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rt_cpu_seq_ops);
}
static const struct file_operations rt_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v)
{
struct ip_rt_acct *dst, *src;
unsigned int i, j;
dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
if (!dst)
return -ENOMEM;
for_each_possible_cpu(i) {
src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
for (j = 0; j < 256; j++) {
dst[j].o_bytes += src[j].o_bytes;
dst[j].o_packets += src[j].o_packets;
dst[j].i_bytes += src[j].i_bytes;
dst[j].i_packets += src[j].i_packets;
}
}
seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
kfree(dst);
return 0;
}
static int rt_acct_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, rt_acct_proc_show, NULL);
}
static const struct file_operations rt_acct_proc_fops = {
.owner = THIS_MODULE,
.open = rt_acct_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
static int __net_init ip_rt_do_proc_init(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
&rt_cache_seq_fops);
if (!pde)
goto err1;
pde = proc_create("rt_cache", S_IRUGO,
net->proc_net_stat, &rt_cpu_seq_fops);
if (!pde)
goto err2;
#ifdef CONFIG_IP_ROUTE_CLASSID
pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
if (!pde)
goto err3;
#endif
return 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
err3:
remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
remove_proc_entry("rt_cache", net->proc_net);
err1:
return -ENOMEM;
}
static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
#ifdef CONFIG_IP_ROUTE_CLASSID
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
static struct pernet_operations ip_rt_proc_ops __net_initdata = {
.init = ip_rt_do_proc_init,
.exit = ip_rt_do_proc_exit,
};
static int __init ip_rt_proc_init(void)
{
return register_pernet_subsys(&ip_rt_proc_ops);
}
#else
static inline int ip_rt_proc_init(void)
{
return 0;
}
#endif /* CONFIG_PROC_FS */
static inline bool rt_is_expired(const struct rtable *rth)
{
return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
}
void rt_cache_flush(struct net *net)
{
rt_genid_bump_ipv4(net);
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
struct neighbour *n;
rt = (const struct rtable *) dst;
if (rt->rt_gateway)
pkey = (const __be32 *) &rt->rt_gateway;
else if (skb)
pkey = &ip_hdr(skb)->daddr;
n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
if (n)
return n;
return neigh_create(&arp_tbl, pkey, dev);
}
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
rt = (const struct rtable *)dst;
if (rt->rt_gateway)
pkey = (const __be32 *)&rt->rt_gateway;
else if (!daddr ||
(rt->rt_flags &
(RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
return;
__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
}
#define IP_IDENTS_SZ 2048u
static atomic_t *ip_idents __read_mostly;
static u32 *ip_tstamps __read_mostly;
/* In order to protect privacy, we add a perturbation to identifiers
* if one generator is seldom used. This makes hard for an attacker
* to infer how many packets were sent between two points in time.
*/
u32 ip_idents_reserve(u32 hash, int segs)
{
u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
u32 old = ACCESS_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
u32 new, delta = 0;
if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = prandom_u32_max(now - old);
/* Do not use atomic_add_return() as it makes UBSAN unhappy */
do {
old = (u32)atomic_read(p_id);
new = old + delta + segs;
} while (atomic_cmpxchg(p_id, old, new) != old);
return new - segs;
}
EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
{
static u32 ip_idents_hashrnd __read_mostly;
u32 hash, id;
net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
hash = jhash_3words((__force u32)iph->daddr,
(__force u32)iph->saddr,
iph->protocol ^ net_hash_mix(net),
ip_idents_hashrnd);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
EXPORT_SYMBOL(__ip_select_ident);
static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
const struct sock *sk,
const struct iphdr *iph,
int oif, u8 tos,
u8 prot, u32 mark, int flow_flags)
{
if (sk) {
const struct inet_sock *inet = inet_sk(sk);
oif = sk->sk_bound_dev_if;
mark = sk->sk_mark;
tos = RT_CONN_FLAGS(sk);
prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
}
flowi4_init_output(fl4, oif, mark, tos,
RT_SCOPE_UNIVERSE, prot,
flow_flags,
iph->daddr, iph->saddr, 0, 0,
sock_net_uid(net, sk));
}
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
const struct sock *sk)
{
const struct net *net = dev_net(skb->dev);
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
}
static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
rcu_read_unlock();
}
static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
const struct sk_buff *skb)
{
if (skb)
build_skb_flow_key(fl4, skb, sk);
else
build_sk_flow_key(fl4, sk);
}
static DEFINE_SPINLOCK(fnhe_lock);
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
{
struct rtable *rt;
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
dst_dev_put(&rt->dst);
dst_release(&rt->dst);
}
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
dst_dev_put(&rt->dst);
dst_release(&rt->dst);
}
}
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
{
struct fib_nh_exception *fnhe, *oldest;
oldest = rcu_dereference(hash->chain);
for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
oldest = fnhe;
}
fnhe_flush_routes(oldest);
return oldest;
}
static inline u32 fnhe_hashfun(__be32 daddr)
{
static u32 fnhe_hashrnd __read_mostly;
u32 hval;
net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
return hash_32(hval, FNHE_HASH_SHIFT);
}
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
rt->rt_pmtu = fnhe->fnhe_pmtu;
rt->dst.expires = fnhe->fnhe_expires;
if (fnhe->fnhe_gw) {
rt->rt_flags |= RTCF_REDIRECTED;
rt->rt_gateway = fnhe->fnhe_gw;
rt->rt_uses_gateway = 1;
}
}
static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
u32 pmtu, unsigned long expires)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe;
struct rtable *rt;
unsigned int i;
int depth;
u32 hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
hash = rcu_dereference(nh->nh_exceptions);
if (!hash) {
hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
if (!hash)
goto out_unlock;
rcu_assign_pointer(nh->nh_exceptions, hash);
}
hash += hval;
depth = 0;
for (fnhe = rcu_dereference(hash->chain); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (fnhe->fnhe_daddr == daddr)
break;
depth++;
}
if (fnhe) {
if (gw)
fnhe->fnhe_gw = gw;
if (pmtu) {
fnhe->fnhe_pmtu = pmtu;
fnhe->fnhe_expires = max(1UL, expires);
}
/* Update all cached dsts too */
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt)
fill_route_from_fnhe(rt, fnhe);
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt)
fill_route_from_fnhe(rt, fnhe);
} else {
if (depth > FNHE_RECLAIM_DEPTH)
fnhe = fnhe_oldest(hash);
else {
fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
if (!fnhe)
goto out_unlock;
fnhe->fnhe_next = hash->chain;
rcu_assign_pointer(hash->chain, fnhe);
}
fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
fnhe->fnhe_daddr = daddr;
fnhe->fnhe_gw = gw;
fnhe->fnhe_pmtu = pmtu;
fnhe->fnhe_expires = expires;
/* Exception created; mark the cached routes for the nexthop
* stale, so anyone caching it rechecks if this exception
* applies to them.
*/
rt = rcu_dereference(nh->nh_rth_input);
if (rt)
rt->dst.obsolete = DST_OBSOLETE_KILL;
for_each_possible_cpu(i) {
struct rtable __rcu **prt;
prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
rt = rcu_dereference(*prt);
if (rt)
rt->dst.obsolete = DST_OBSOLETE_KILL;
}
}
fnhe->fnhe_stamp = jiffies;
out_unlock:
spin_unlock_bh(&fnhe_lock);
}
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
bool kill_route)
{
__be32 new_gw = icmp_hdr(skb)->un.gateway;
__be32 old_gw = ip_hdr(skb)->saddr;
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct fib_result res;
struct neighbour *n;
struct net *net;
switch (icmp_hdr(skb)->code & 7) {
case ICMP_REDIR_NET:
case ICMP_REDIR_NETTOS:
case ICMP_REDIR_HOST:
case ICMP_REDIR_HOSTTOS:
break;
default:
return;
}
if (rt->rt_gateway != old_gw)
return;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
return;
net = dev_net(dev);
if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
ipv4_is_zeronet(new_gw))
goto reject_redirect;
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (!inet_addr_onlink(in_dev, new_gw, old_gw))
goto reject_redirect;
if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
goto reject_redirect;
} else {
if (inet_addr_type(net, new_gw) != RTN_UNICAST)
goto reject_redirect;
}
n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
if (!n)
n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
if (!IS_ERR(n)) {
if (!(n->nud_state & NUD_VALID)) {
neigh_event_send(n, NULL);
} else {
if (fib_lookup(net, fl4, &res, 0) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, new_gw,
0, jiffies + ip_rt_gc_timeout);
}
if (kill_route)
rt->dst.obsolete = DST_OBSOLETE_KILL;
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
neigh_release(n);
}
return;
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev)) {
const struct iphdr *iph = (const struct iphdr *) skb->data;
__be32 daddr = iph->daddr;
__be32 saddr = iph->saddr;
net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
" Advised path = %pI4 -> %pI4\n",
&old_gw, dev->name, &new_gw,
&saddr, &daddr);
}
#endif
;
}
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
{
struct rtable *rt;
struct flowi4 fl4;
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct net *net = dev_net(skb->dev);
int oif = skb->dev->ifindex;
u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
rt = (struct rtable *) dst;
__build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
__ip_do_redirect(rt, skb, &fl4, true);
}
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *)dst;
struct dst_entry *ret = dst;
if (rt) {
if (dst->obsolete > 0) {
ip_rt_put(rt);
ret = NULL;
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
rt->dst.expires) {
ip_rt_put(rt);
ret = NULL;
}
}
return ret;
}
/*
* Algorithm:
* 1. The first ip_rt_redirect_number redirects are sent
* with exponential backoff, then we stop sending them at all,
* assuming that the host ignores our redirects.
* 2. If we did not see packets requiring redirects
* during ip_rt_redirect_silence, we assume that the host
* forgot redirected route and start to send redirects again.
*
* This algorithm is much cheaper and more intelligent than dumb load limiting
* in icmp.c.
*
* NOTE. Do not forget to inhibit load limiting for redirects (redundant)
* and "frag. need" (breaks PMTU discovery) in icmp.c.
*/
void ip_rt_send_redirect(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct in_device *in_dev;
struct inet_peer *peer;
struct net *net;
int log_martians;
int vif;
rcu_read_lock();
in_dev = __in_dev_get_rcu(rt->dst.dev);
if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
rcu_read_unlock();
return;
}
log_martians = IN_DEV_LOG_MARTIANS(in_dev);
vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
rcu_read_unlock();
net = dev_net(rt->dst.dev);
peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
if (!peer) {
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
rt_nexthop(rt, ip_hdr(skb)->daddr));
return;
}
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
peer->rate_tokens = 0;
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
if (peer->rate_tokens >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
goto out_put_peer;
}
/* Check for load limit; set rate_last to the latest sent
* redirect.
*/
if (peer->rate_tokens == 0 ||
time_after(jiffies,
(peer->rate_last +
(ip_rt_redirect_load << peer->rate_tokens)))) {
__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
#endif
}
out_put_peer:
inet_putpeer(peer);
}
static int ip_error(struct sk_buff *skb)
{
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
struct rtable *rt = skb_rtable(skb);
struct inet_peer *peer;
unsigned long now;
struct net *net;
bool send;
int code;
/* IP on this device is disabled. */
if (!in_dev)
goto out;
net = dev_net(rt->dst.dev);
if (!IN_DEV_FORWARD(in_dev)) {
switch (rt->dst.error) {
case EHOSTUNREACH:
__IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
break;
case ENETUNREACH:
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
}
goto out;
}
switch (rt->dst.error) {
case EINVAL:
default:
goto out;
case EHOSTUNREACH:
code = ICMP_HOST_UNREACH;
break;
case ENETUNREACH:
code = ICMP_NET_UNREACH;
__IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break;
case EACCES:
code = ICMP_PKT_FILTERED;
break;
}
peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
l3mdev_master_ifindex(skb->dev), 1);
send = true;
if (peer) {
now = jiffies;
peer->rate_tokens += now - peer->rate_last;
if (peer->rate_tokens > ip_rt_error_burst)
peer->rate_tokens = ip_rt_error_burst;
peer->rate_last = now;
if (peer->rate_tokens >= ip_rt_error_cost)
peer->rate_tokens -= ip_rt_error_cost;
else
send = false;
inet_putpeer(peer);
}
if (send)
icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
out: kfree_skb(skb);
return 0;
}
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
struct fib_result res;
if (dst_metric_locked(dst, RTAX_MTU))
return;
if (ipv4_mtu(dst) < mtu)
return;
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
if (rt->rt_pmtu == mtu &&
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
return;
rcu_read_lock();
if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
jiffies + ip_rt_mtu_expires);
}
rcu_read_unlock();
}
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
struct rtable *rt = (struct rtable *) dst;
struct flowi4 fl4;
ip_rt_build_flow_key(&fl4, sk, skb);
__ip_rt_update_pmtu(rt, &fl4, mtu);
}
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
int oif, u32 mark, u8 protocol, int flow_flags)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
if (!mark)
mark = IP4_REPLY_MARK(net, skb->mark);
__build_flow_key(net, &fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
if (!fl4.flowi4_mark)
fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
rt = __ip_route_output_key(sock_net(sk), &fl4);
if (!IS_ERR(rt)) {
__ip_rt_update_pmtu(rt, &fl4, mtu);
ip_rt_put(rt);
}
}
void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct dst_entry *odst = NULL;
bool new = false;
struct net *net = sock_net(sk);
bh_lock_sock(sk);
if (!ip_sk_accept_pmtu(sk))
goto out;
odst = sk_dst_get(sk);
if (sock_owned_by_user(sk) || !odst) {
__ipv4_sk_update_pmtu(skb, sk, mtu);
goto out;
}
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
rt = (struct rtable *)odst;
if (odst->obsolete && !odst->ops->check(odst, 0)) {
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
new = true;
}
__ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
if (!dst_check(&rt->dst, 0)) {
if (new)
dst_release(&rt->dst);
rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
if (IS_ERR(rt))
goto out;
new = true;
}
if (new)
sk_dst_set(sk, &rt->dst);
out:
bh_unlock_sock(sk);
dst_release(odst);
}
EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
void ipv4_redirect(struct sk_buff *skb, struct net *net,
int oif, u32 mark, u8 protocol, int flow_flags)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
__build_flow_key(net, &fl4, NULL, iph, oif,
RT_TOS(iph->tos), protocol, mark, flow_flags);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_redirect);
void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
const struct iphdr *iph = (const struct iphdr *) skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct net *net = sock_net(sk);
__build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
rt = __ip_route_output_key(net, &fl4);
if (!IS_ERR(rt)) {
__ip_do_redirect(rt, skb, &fl4, false);
ip_rt_put(rt);
}
}
EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
{
struct rtable *rt = (struct rtable *) dst;
/* All IPV4 dsts are created with ->obsolete set to the value
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
* into this function always.
*
* When a PMTU/redirect information update invalidates a route,
* this is indicated by setting obsolete to DST_OBSOLETE_KILL or
* DST_OBSOLETE_DEAD by dst_free().
*/
if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
return NULL;
return dst;
}
static void ipv4_link_failure(struct sk_buff *skb)
{
struct rtable *rt;
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
rt = skb_rtable(skb);
if (rt)
dst_set_expires(&rt->dst, 0);
}
static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
{
pr_debug("%s: %pI4 -> %pI4, %s\n",
__func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
WARN_ON(1);
return 0;
}
/*
We do not cache source address of outgoing interface,
because it is used only by IP RR, TS and SRR options,
so that it out of fast path.
BTW remember: "addr" is allowed to be not aligned
in IP options!
*/
void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
{
__be32 src;
if (rt_is_output_route(rt))
src = ip_hdr(skb)->saddr;
else {
struct fib_result res;
struct flowi4 fl4;
struct iphdr *iph;
iph = ip_hdr(skb);
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = iph->daddr;
fl4.saddr = iph->saddr;
fl4.flowi4_tos = RT_TOS(iph->tos);
fl4.flowi4_oif = rt->dst.dev->ifindex;
fl4.flowi4_iif = skb->dev->ifindex;
fl4.flowi4_mark = skb->mark;
rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
else
src = inet_select_addr(rt->dst.dev,
rt_nexthop(rt, iph->daddr),
RT_SCOPE_UNIVERSE);
rcu_read_unlock();
}
memcpy(addr, &src, 4);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
static void set_class_tag(struct rtable *rt, u32 tag)
{
if (!(rt->dst.tclassid & 0xFFFF))
rt->dst.tclassid |= tag & 0xFFFF;
if (!(rt->dst.tclassid & 0xFFFF0000))
rt->dst.tclassid |= tag & 0xFFFF0000;
}
#endif
static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
{
unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
unsigned int advmss = max_t(unsigned int, dst->dev->mtu - header_size,
ip_rt_min_advmss);
return min(advmss, IPV4_MAX_PMTU - header_size);
}
static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
const struct rtable *rt = (const struct rtable *) dst;
unsigned int mtu = rt->rt_pmtu;
if (!mtu || time_after_eq(jiffies, rt->dst.expires))
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
return mtu;
mtu = READ_ONCE(dst->dev->mtu);
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
if (rt->rt_uses_gateway && mtu > 576)
mtu = 576;
}
mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
}
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
{
struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
struct fib_nh_exception *fnhe;
u32 hval;
if (!hash)
return NULL;
hval = fnhe_hashfun(daddr);
for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (fnhe->fnhe_daddr == daddr)
return fnhe;
}
return NULL;
}
static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
__be32 daddr, const bool do_cache)
{
bool ret = false;
spin_lock_bh(&fnhe_lock);
if (daddr == fnhe->fnhe_daddr) {
struct rtable __rcu **porig;
struct rtable *orig;
int genid = fnhe_genid(dev_net(rt->dst.dev));
if (rt_is_input_route(rt))
porig = &fnhe->fnhe_rth_input;
else
porig = &fnhe->fnhe_rth_output;
orig = rcu_dereference(*porig);
if (fnhe->fnhe_genid != genid) {
fnhe->fnhe_genid = genid;
fnhe->fnhe_gw = 0;
fnhe->fnhe_pmtu = 0;
fnhe->fnhe_expires = 0;
fnhe_flush_routes(fnhe);
orig = NULL;
}
fill_route_from_fnhe(rt, fnhe);
if (!rt->rt_gateway)
rt->rt_gateway = daddr;
if (do_cache) {
dst_hold(&rt->dst);
rcu_assign_pointer(*porig, rt);
if (orig) {
dst_dev_put(&orig->dst);
dst_release(&orig->dst);
}
ret = true;
}
fnhe->fnhe_stamp = jiffies;
}
spin_unlock_bh(&fnhe_lock);
return ret;
}
static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
{
struct rtable *orig, *prev, **p;
bool ret = true;
if (rt_is_input_route(rt)) {
p = (struct rtable **)&nh->nh_rth_input;
} else {
p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
}
orig = *p;
/* hold dst before doing cmpxchg() to avoid race condition
* on this dst
*/
dst_hold(&rt->dst);
prev = cmpxchg(p, orig, rt);
if (prev == orig) {
if (orig) {
dst_dev_put(&orig->dst);
dst_release(&orig->dst);
}
} else {
dst_release(&rt->dst);
ret = false;
}
return ret;
}
struct uncached_list {
spinlock_t lock;
struct list_head head;
};
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
static void rt_add_uncached_list(struct rtable *rt)
{
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
rt->rt_uncached_list = ul;
spin_lock_bh(&ul->lock);
list_add_tail(&rt->rt_uncached, &ul->head);
spin_unlock_bh(&ul->lock);
}
static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *) dst;
if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
kfree(p);
if (!list_empty(&rt->rt_uncached)) {
struct uncached_list *ul = rt->rt_uncached_list;
spin_lock_bh(&ul->lock);
list_del(&rt->rt_uncached);
spin_unlock_bh(&ul->lock);
}
}
void rt_flush_dev(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct rtable *rt;
int cpu;
for_each_possible_cpu(cpu) {
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
spin_lock_bh(&ul->lock);
list_for_each_entry(rt, &ul->head, rt_uncached) {
if (rt->dst.dev != dev)
continue;
rt->dst.dev = net->loopback_dev;
dev_hold(rt->dst.dev);
dev_put(dev);
}
spin_unlock_bh(&ul->lock);
}
}
static bool rt_cache_valid(const struct rtable *rt)
{
return rt &&
rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
!rt_is_expired(rt);
}
static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
const struct fib_result *res,
struct fib_nh_exception *fnhe,
struct fib_info *fi, u16 type, u32 itag,
const bool do_cache)
{
bool cached = false;
if (fi) {
struct fib_nh *nh = &FIB_RES_NH(*res);
if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
rt->rt_gateway = nh->nh_gw;
rt->rt_uses_gateway = 1;
}
dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
if (fi->fib_metrics != &dst_default_metrics) {
rt->dst._metrics |= DST_METRICS_REFCOUNTED;
atomic_inc(&fi->fib_metrics->refcnt);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (unlikely(fnhe))
cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
else if (do_cache)
cached = rt_cache_route(nh, rt);
if (unlikely(!cached)) {
/* Routes we intend to cache in nexthop exception or
* FIB nexthop have the DST_NOCACHE bit clear.
* However, if we are unsuccessful at storing this
* route into the cache we really need to set it.
*/
if (!rt->rt_gateway)
rt->rt_gateway = daddr;
rt_add_uncached_list(rt);
}
} else
rt_add_uncached_list(rt);
#ifdef CONFIG_IP_ROUTE_CLASSID
#ifdef CONFIG_IP_MULTIPLE_TABLES
set_class_tag(rt, res->tclassid);
#endif
set_class_tag(rt, itag);
#endif
}
struct rtable *rt_dst_alloc(struct net_device *dev,
unsigned int flags, u16 type,
bool nopolicy, bool noxfrm, bool will_cache)
{
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
(will_cache ? 0 : DST_HOST) |
(nopolicy ? DST_NOPOLICY : 0) |
(noxfrm ? DST_NOXFRM : 0));
if (rt) {
rt->rt_genid = rt_genid_ipv4(dev_net(dev));
rt->rt_flags = flags;
rt->rt_type = type;
rt->rt_is_input = 0;
rt->rt_iif = 0;
rt->rt_pmtu = 0;
rt->rt_gateway = 0;
rt->rt_uses_gateway = 0;
rt->rt_table_id = 0;
INIT_LIST_HEAD(&rt->rt_uncached);
rt->dst.output = ip_output;
if (flags & RTCF_LOCAL)
rt->dst.input = ip_local_deliver;
}
return rt;
}
EXPORT_SYMBOL(rt_dst_alloc);
/* called in rcu_read_lock() section */
static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
struct rtable *rth;
struct in_device *in_dev = __in_dev_get_rcu(dev);
unsigned int flags = RTCF_MULTICAST;
u32 itag = 0;
int err;
/* Primary sanity checks. */
if (!in_dev)
return -EINVAL;
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
goto e_inval;
if (ipv4_is_zeronet(saddr)) {
if (!ipv4_is_local_multicast(daddr))
goto e_inval;
} else {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
in_dev, &itag);
if (err < 0)
goto e_err;
}
if (our)
flags |= RTCF_LOCAL;
rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
if (!rth)
goto e_nobufs;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->dst.output = ip_rt_bug;
rth->rt_is_input= 1;
#ifdef CONFIG_IP_MROUTE
if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
rth->dst.input = ip_mr_input;
#endif
RT_CACHE_STAT_INC(in_slow_mc);
skb_dst_set(skb, &rth->dst);
return 0;
e_nobufs:
return -ENOBUFS;
e_inval:
return -EINVAL;
e_err:
return err;
}
static void ip_handle_martian_source(struct net_device *dev,
struct in_device *in_dev,
struct sk_buff *skb,
__be32 daddr,
__be32 saddr)
{
RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
/*
* RFC1812 recommendation, if source is martian,
* the only hint is MAC header.
*/
pr_warn("martian source %pI4 from %pI4, on dev %s\n",
&daddr, &saddr, dev->name);
if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
print_hex_dump(KERN_WARNING, "ll header: ",
DUMP_PREFIX_OFFSET, 16, 1,
skb_mac_header(skb),
dev->hard_header_len, true);
}
}
#endif
}
static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe, __rcu **fnhe_p;
u32 hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
hash = rcu_dereference_protected(nh->nh_exceptions,
lockdep_is_held(&fnhe_lock));
hash += hval;
fnhe_p = &hash->chain;
fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
while (fnhe) {
if (fnhe->fnhe_daddr == daddr) {
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
fnhe_flush_routes(fnhe);
kfree_rcu(fnhe, rcu);
break;
}
fnhe_p = &fnhe->fnhe_next;
fnhe = rcu_dereference_protected(fnhe->fnhe_next,
lockdep_is_held(&fnhe_lock));
}
spin_unlock_bh(&fnhe_lock);
}
static void set_lwt_redirect(struct rtable *rth)
{
if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
rth->dst.lwtstate->orig_output = rth->dst.output;
rth->dst.output = lwtunnel_output;
}
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
rth->dst.lwtstate->orig_input = rth->dst.input;
rth->dst.input = lwtunnel_input;
}
}
/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
struct fib_nh_exception *fnhe;
struct rtable *rth;
int err;
struct in_device *out_dev;
bool do_cache;
u32 itag = 0;
/* get a working reference to the output device */
out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
if (!out_dev) {
net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
return -EINVAL;
}
err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
in_dev->dev, in_dev, &itag);
if (err < 0) {
ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
saddr);
goto cleanup;
}
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
skb->protocol == htons(ETH_P_IP) &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
IPCB(skb)->flags |= IPSKB_DOREDIRECT;
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
*
* Proxy arp feature have been extended to allow, ARP
* replies back to the same interface, to support
* Private VLAN switch technologies. See arp.c.
*/
if (out_dev == in_dev &&
IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
}
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
if (do_cache) {
if (fnhe) {
rth = rcu_dereference(fnhe->fnhe_rth_input);
if (rth && rth->dst.expires &&
time_after(jiffies, rth->dst.expires)) {
ip_del_fnhe(&FIB_RES_NH(*res), daddr);
fnhe = NULL;
} else {
goto rt_cache;
}
}
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
rt_cache:
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
}
}
rth = rt_dst_alloc(out_dev->dev, 0, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
if (!rth) {
err = -ENOBUFS;
goto cleanup;
}
rth->rt_is_input = 1;
if (res->table)
rth->rt_table_id = res->table->tb_id;
RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
do_cache);
set_lwt_redirect(rth);
skb_dst_set(skb, &rth->dst);
out:
err = 0;
cleanup:
return err;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/* To make ICMP packets follow the right flow, the multipath hash is
* calculated from the inner IP addresses.
*/
static void ip_multipath_l3_keys(const struct sk_buff *skb,
struct flow_keys *hash_keys)
{
const struct iphdr *outer_iph = ip_hdr(skb);
const struct iphdr *inner_iph;
const struct icmphdr *icmph;
struct iphdr _inner_iph;
struct icmphdr _icmph;
hash_keys->addrs.v4addrs.src = outer_iph->saddr;
hash_keys->addrs.v4addrs.dst = outer_iph->daddr;
if (likely(outer_iph->protocol != IPPROTO_ICMP))
return;
if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
return;
icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
&_icmph);
if (!icmph)
return;
if (icmph->type != ICMP_DEST_UNREACH &&
icmph->type != ICMP_REDIRECT &&
icmph->type != ICMP_TIME_EXCEEDED &&
icmph->type != ICMP_PARAMETERPROB)
return;
inner_iph = skb_header_pointer(skb,
outer_iph->ihl * 4 + sizeof(_icmph),
sizeof(_inner_iph), &_inner_iph);
if (!inner_iph)
return;
hash_keys->addrs.v4addrs.src = inner_iph->saddr;
hash_keys->addrs.v4addrs.dst = inner_iph->daddr;
}
/* if skb is set it will be used and fl4 can be NULL */
int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
const struct sk_buff *skb)
{
struct net *net = fi->fib_net;
struct flow_keys hash_keys;
u32 mhash;
switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
case 0:
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
if (skb) {
ip_multipath_l3_keys(skb, &hash_keys);
} else {
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
}
break;
case 1:
/* skb is currently provided only when forwarding */
if (skb) {
unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
struct flow_keys keys;
/* short-circuit if we already have L4 hash present */
if (skb->l4_hash)
return skb_get_hash_raw(skb) >> 1;
memset(&hash_keys, 0, sizeof(hash_keys));
skb_flow_dissect_flow_keys(skb, &keys, flag);
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
hash_keys.ports.src = keys.ports.src;
hash_keys.ports.dst = keys.ports.dst;
hash_keys.basic.ip_proto = keys.basic.ip_proto;
} else {
memset(&hash_keys, 0, sizeof(hash_keys));
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
hash_keys.addrs.v4addrs.src = fl4->saddr;
hash_keys.addrs.v4addrs.dst = fl4->daddr;
hash_keys.ports.src = fl4->fl4_sport;
hash_keys.ports.dst = fl4->fl4_dport;
hash_keys.basic.ip_proto = fl4->flowi4_proto;
}
break;
}
mhash = flow_hash_from_keys(&hash_keys);
return mhash >> 1;
}
EXPORT_SYMBOL_GPL(fib_multipath_hash);
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
static int ip_mkroute_input(struct sk_buff *skb,
struct fib_result *res,
struct in_device *in_dev,
__be32 daddr, __be32 saddr, u32 tos)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (res->fi && res->fi->fib_nhs > 1) {
int h = fib_multipath_hash(res->fi, NULL, skb);
fib_select_multipath(res, h);
}
#endif
/* create a routing cache entry */
return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
}
/*
* NOTE. We drop all the packets that has local source
* addresses, because every properly looped back packet
* must have correct destination already attached by output routine.
*
* Such approach solves two big problems:
* 1. Not simplex devices are handled properly.
* 2. IP spoofing attempts are filtered with 100% of guarantee.
* called with rcu_read_lock()
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev,
struct fib_result *res)
{
struct in_device *in_dev = __in_dev_get_rcu(dev);
struct ip_tunnel_info *tun_info;
struct flowi4 fl4;
unsigned int flags = 0;
u32 itag = 0;
struct rtable *rth;
int err = -EINVAL;
struct net *net = dev_net(dev);
bool do_cache;
/* IP on this device is disabled. */
if (!in_dev)
goto out;
/* Check for the most weird martians, which can be not detected
by fib_lookup.
*/
tun_info = skb_tunnel_info(skb);
if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
else
fl4.flowi4_tun_key.tun_id = 0;
skb_dst_drop(skb);
if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
goto martian_source;
res->fi = NULL;
res->table = NULL;
if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
goto brd_input;
/* Accept zero addresses only to limited broadcast;
* I even do not know to fix it or not. Waiting for complains :-)
*/
if (ipv4_is_zeronet(saddr))
goto martian_source;
if (ipv4_is_zeronet(daddr))
goto martian_destination;
/* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
* and call it once if daddr or/and saddr are loopback addresses
*/
if (ipv4_is_loopback(daddr)) {
if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
goto martian_destination;
} else if (ipv4_is_loopback(saddr)) {
if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
goto martian_source;
}
/*
* Now we are ready to route packet.
*/
fl4.flowi4_oif = 0;
fl4.flowi4_iif = dev->ifindex;
fl4.flowi4_mark = skb->mark;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_flags = 0;
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_uid = sock_net_uid(net, NULL);
err = fib_lookup(net, &fl4, res, 0);
if (err != 0) {
if (!IN_DEV_FORWARD(in_dev))
err = -EHOSTUNREACH;
goto no_route;
}
if (res->type == RTN_BROADCAST)
goto brd_input;
if (res->type == RTN_LOCAL) {
err = fib_validate_source(skb, saddr, daddr, tos,
0, dev, in_dev, &itag);
if (err < 0)
goto martian_source;
goto local_input;
}
if (!IN_DEV_FORWARD(in_dev)) {
err = -EHOSTUNREACH;
goto no_route;
}
if (res->type != RTN_UNICAST)
goto martian_destination;
err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos);
out: return err;
brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
if (!ipv4_is_zeronet(saddr)) {
err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
in_dev, &itag);
if (err < 0)
goto martian_source;
}
flags |= RTCF_BROADCAST;
res->type = RTN_BROADCAST;
RT_CACHE_STAT_INC(in_brd);
local_input:
do_cache = false;
if (res->fi) {
if (!itag) {
rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
err = 0;
goto out;
}
do_cache = true;
}
}
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
flags | RTCF_LOCAL, res->type,
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
if (!rth)
goto e_nobufs;
rth->dst.output= ip_rt_bug;
#ifdef CONFIG_IP_ROUTE_CLASSID
rth->dst.tclassid = itag;
#endif
rth->rt_is_input = 1;
if (res->table)
rth->rt_table_id = res->table->tb_id;
RT_CACHE_STAT_INC(in_slow_tot);
if (res->type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
rth->rt_flags &= ~RTCF_LOCAL;
}
if (do_cache) {
struct fib_nh *nh = &FIB_RES_NH(*res);
rth->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
WARN_ON(rth->dst.input == lwtunnel_input);
rth->dst.lwtstate->orig_input = rth->dst.input;
rth->dst.input = lwtunnel_input;
}
if (unlikely(!rt_cache_route(nh, rth)))
rt_add_uncached_list(rth);
}
skb_dst_set(skb, &rth->dst);
err = 0;
goto out;
no_route:
RT_CACHE_STAT_INC(in_no_route);
res->type = RTN_UNREACHABLE;
res->fi = NULL;
res->table = NULL;
goto local_input;
/*
* Do not cache martian addresses: they should be logged (RFC1812)
*/
martian_destination:
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev))
net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
&daddr, &saddr, dev->name);
#endif
e_inval:
err = -EINVAL;
goto out;
e_nobufs:
err = -ENOBUFS;
goto out;
martian_source:
ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
goto out;
}
int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct fib_result res;
int err;
tos &= IPTOS_RT_MASK;
rcu_read_lock();
err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(ip_route_input_noref);
/* called with rcu_read_lock held */
int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, struct fib_result *res)
{
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
network acquires a lot of useless route cache entries, sort of
SDR messages from all the world. Now we try to get rid of them.
Really, provided software IP multicast filter is organized
reasonably (at least, hashed), it does not result in a slowdown
comparing with route cache reject entries.
Note, that multicast routers are not affected, because
route cache entry is created eventually.
*/
if (ipv4_is_multicast(daddr)) {
struct in_device *in_dev = __in_dev_get_rcu(dev);
int our = 0;
int err = -EINVAL;
if (in_dev)
our = ip_check_mc_rcu(in_dev, daddr, saddr,
ip_hdr(skb)->protocol);
/* check l3 master if no match yet */
if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
struct in_device *l3_in_dev;
l3_in_dev = __in_dev_get_rcu(skb->dev);
if (l3_in_dev)
our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
ip_hdr(skb)->protocol);
}
if (our
#ifdef CONFIG_IP_MROUTE
||
(!ipv4_is_local_multicast(daddr) &&
IN_DEV_MFORWARD(in_dev))
#endif
) {
err = ip_route_input_mc(skb, daddr, saddr,
tos, dev, our);
}
return err;
}
return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
}
/* called with rcu_read_lock() */
static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4, int orig_oif,
struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
struct fib_nh_exception *fnhe;
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
bool do_cache;
in_dev = __in_dev_get_rcu(dev_out);
if (!in_dev)
return ERR_PTR(-EINVAL);
if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
if (ipv4_is_loopback(fl4->saddr) &&
!(dev_out->flags & IFF_LOOPBACK) &&
!netif_is_l3_master(dev_out))
return ERR_PTR(-EINVAL);
if (ipv4_is_lbcast(fl4->daddr))
type = RTN_BROADCAST;
else if (ipv4_is_multicast(fl4->daddr))
type = RTN_MULTICAST;
else if (ipv4_is_zeronet(fl4->daddr))
return ERR_PTR(-EINVAL);
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
do_cache = true;
if (type == RTN_BROADCAST) {
flags |= RTCF_BROADCAST | RTCF_LOCAL;
fi = NULL;
} else if (type == RTN_MULTICAST) {
flags |= RTCF_MULTICAST | RTCF_LOCAL;
if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
fl4->flowi4_proto))
flags &= ~RTCF_LOCAL;
else
do_cache = false;
/* If multicast route do not exist use
* default one, but do not gateway in this case.
* Yes, it is hack.
*/
if (fi && res->prefixlen < 4)
fi = NULL;
} else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
(orig_oif != dev_out->ifindex)) {
/* For local routes that require a particular output interface
* we do not want to cache the result. Caching the result
* causes incorrect behaviour when there are multiple source
* addresses on the interface, the end result being that if the
* intended recipient is waiting on that interface for the
* packet he won't receive it because it will be delivered on
* the loopback interface and the IP_PKTINFO ipi_ifindex will
* be set to the loopback interface as well.
*/
fi = NULL;
}
fnhe = NULL;
do_cache &= fi != NULL;
if (do_cache) {
struct rtable __rcu **prth;
struct fib_nh *nh = &FIB_RES_NH(*res);
fnhe = find_exception(nh, fl4->daddr);
if (fnhe) {
prth = &fnhe->fnhe_rth_output;
rth = rcu_dereference(*prth);
if (rth && rth->dst.expires &&
time_after(jiffies, rth->dst.expires)) {
ip_del_fnhe(nh, fl4->daddr);
fnhe = NULL;
} else {
goto rt_cache;
}
}
if (unlikely(fl4->flowi4_flags &
FLOWI_FLAG_KNOWN_NH &&
!(nh->nh_gw &&
nh->nh_scope == RT_SCOPE_LINK))) {
do_cache = false;
goto add;
}
prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
rth = rcu_dereference(*prth);
rt_cache:
if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
return rth;
}
add:
rth = rt_dst_alloc(dev_out, flags, type,
IN_DEV_CONF_GET(in_dev, NOPOLICY),
IN_DEV_CONF_GET(in_dev, NOXFRM),
do_cache);
if (!rth)
return ERR_PTR(-ENOBUFS);
rth->rt_iif = orig_oif ? : 0;
if (res->table)
rth->rt_table_id = res->table->tb_id;
RT_CACHE_STAT_INC(out_slow_tot);
if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
if (flags & RTCF_LOCAL &&
!(dev_out->flags & IFF_LOOPBACK)) {
rth->dst.output = ip_mc_output;
RT_CACHE_STAT_INC(out_slow_mc);
}
#ifdef CONFIG_IP_MROUTE
if (type == RTN_MULTICAST) {
if (IN_DEV_MFORWARD(in_dev) &&
!ipv4_is_local_multicast(fl4->daddr)) {
rth->dst.input = ip_mr_input;
rth->dst.output = ip_mc_output;
}
}
#endif
}
rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
set_lwt_redirect(rth);
return rth;
}
/*
* Major route resolver routine.
*/
struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
const struct sk_buff *skb)
{
__u8 tos = RT_FL_TOS(fl4);
struct fib_result res;
struct rtable *rth;
res.tclassid = 0;
res.fi = NULL;
res.table = NULL;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
rcu_read_lock();
rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
rcu_read_unlock();
return rth;
}
EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
struct fib_result *res,
const struct sk_buff *skb)
{
struct net_device *dev_out = NULL;
int orig_oif = fl4->flowi4_oif;
unsigned int flags = 0;
struct rtable *rth;
int err = -ENETUNREACH;
if (fl4->saddr) {
rth = ERR_PTR(-EINVAL);
if (ipv4_is_multicast(fl4->saddr) ||
ipv4_is_lbcast(fl4->saddr) ||
ipv4_is_zeronet(fl4->saddr))
goto out;
/* I removed check for oif == dev_out->oif here.
It was wrong for two reasons:
1. ip_dev_find(net, saddr) can return wrong iface, if saddr
is assigned to multiple interfaces.
2. Moreover, we are allowed to send packets with saddr
of another iface. --ANK
*/
if (fl4->flowi4_oif == 0 &&
(ipv4_is_multicast(fl4->daddr) ||
ipv4_is_lbcast(fl4->daddr))) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
dev_out = __ip_dev_find(net, fl4->saddr, false);
if (!dev_out)
goto out;
/* Special hack: user can direct multicasts
and limited broadcast via necessary interface
without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
This hack is not just for fun, it allows
vic,vat and friends to work.
They bind socket to loopback, set ttl to zero
and expect that it will work.
From the viewpoint of routing cache they are broken,
because we are not allowed to build multicast path
with loopback source addr (look, routing cache
cannot know, that ttl is zero, so that packet
will not leave this host and route is valid).
Luckily, this hack is good workaround.
*/
fl4->flowi4_oif = dev_out->ifindex;
goto make_route;
}
if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
/* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
if (!__ip_dev_find(net, fl4->saddr, false))
goto out;
}
}
if (fl4->flowi4_oif) {
dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
rth = ERR_PTR(-ENODEV);
if (!dev_out)
goto out;
/* RACE: Check return value of inet_select_addr instead. */
if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
rth = ERR_PTR(-ENETUNREACH);
goto out;
}
if (ipv4_is_local_multicast(fl4->daddr) ||
ipv4_is_lbcast(fl4->daddr) ||
fl4->flowi4_proto == IPPROTO_IGMP) {
if (!fl4->saddr)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
goto make_route;
}
if (!fl4->saddr) {
if (ipv4_is_multicast(fl4->daddr))
fl4->saddr = inet_select_addr(dev_out, 0,
fl4->flowi4_scope);
else if (!fl4->daddr)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_HOST);
}
}
if (!fl4->daddr) {
fl4->daddr = fl4->saddr;
if (!fl4->daddr)
fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
dev_out = net->loopback_dev;
fl4->flowi4_oif = LOOPBACK_IFINDEX;
res->type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
}
err = fib_lookup(net, fl4, res, 0);
if (err) {
res->fi = NULL;
res->table = NULL;
if (fl4->flowi4_oif &&
(ipv4_is_multicast(fl4->daddr) ||
!netif_index_is_l3_master(net, fl4->flowi4_oif))) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
WHY? DW.
Because we are allowed to send to iface
even if it has NO routes and NO assigned
addresses. When oif is specified, routing
tables are looked up with only one purpose:
to catch if destination is gatewayed, rather than
direct. Moreover, if MSG_DONTROUTE is set,
we send packet, ignoring both routing tables
and ifaddr state. --ANK
We could make it even if oif is unknown,
likely IPv6, but we do not.
*/
if (fl4->saddr == 0)
fl4->saddr = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
res->type = RTN_UNICAST;
goto make_route;
}
rth = ERR_PTR(err);
goto out;
}
if (res->type == RTN_LOCAL) {
if (!fl4->saddr) {
if (res->fi->fib_prefsrc)
fl4->saddr = res->fi->fib_prefsrc;
else
fl4->saddr = fl4->daddr;
}
/* L3 master device is the loopback for that domain */
dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
net->loopback_dev;
fl4->flowi4_oif = dev_out->ifindex;
flags |= RTCF_LOCAL;
goto make_route;
}
fib_select_path(net, res, fl4, skb);
dev_out = FIB_RES_DEV(*res);
fl4->flowi4_oif = dev_out->ifindex;
make_route:
rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
out:
return rth;
}
static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
{
return NULL;
}
static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
{
unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
return mtu ? : dst->dev->mtu;
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
}
static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb)
{
}
static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
unsigned long old)
{
return NULL;
}
static struct dst_ops ipv4_dst_blackhole_ops = {
.family = AF_INET,
.check = ipv4_blackhole_dst_check,
.mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.redirect = ipv4_rt_blackhole_redirect,
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
.neigh_lookup = ipv4_neigh_lookup,
};
struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
{
struct rtable *ort = (struct rtable *) dst_orig;
struct rtable *rt;
rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
if (rt) {
struct dst_entry *new = &rt->dst;
new->__use = 1;
new->input = dst_discard;
new->output = dst_discard_out;
new->dev = net->loopback_dev;
if (new->dev)
dev_hold(new->dev);
rt->rt_is_input = ort->rt_is_input;
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
rt->rt_gateway = ort->rt_gateway;
rt->rt_uses_gateway = ort->rt_uses_gateway;
INIT_LIST_HEAD(&rt->rt_uncached);
}
dst_release(dst_orig);
return rt ? &rt->dst : ERR_PTR(-ENOMEM);
}
struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
const struct sock *sk)
{
struct rtable *rt = __ip_route_output_key(net, flp4);
if (IS_ERR(rt))
return rt;
if (flp4->flowi4_proto)
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
return rt;
}
EXPORT_SYMBOL_GPL(ip_route_output_flow);
/* called with rcu_read_lock held */
static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
u32 seq)
{
struct rtable *rt = skb_rtable(skb);
struct rtmsg *r;
struct nlmsghdr *nlh;
unsigned long expires = 0;
u32 error;
u32 metrics[RTAX_MAX];
nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
if (!nlh)
return -EMSGSIZE;
r = nlmsg_data(nlh);
r->rtm_family = AF_INET;
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
r->rtm_tos = fl4->flowi4_tos;
r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table_id))
goto nla_put_failure;
r->rtm_type = rt->rt_type;
r->rtm_scope = RT_SCOPE_UNIVERSE;
r->rtm_protocol = RTPROT_UNSPEC;
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
r->rtm_flags |= RTCF_DOREDIRECT;
if (nla_put_in_addr(skb, RTA_DST, dst))
goto nla_put_failure;
if (src) {
r->rtm_src_len = 32;
if (nla_put_in_addr(skb, RTA_SRC, src))
goto nla_put_failure;
}
if (rt->dst.dev &&
nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
goto nla_put_failure;
#ifdef CONFIG_IP_ROUTE_CLASSID
if (rt->dst.tclassid &&
nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
goto nla_put_failure;
#endif
if (!rt_is_input_route(rt) &&
fl4->saddr != src) {
if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
goto nla_put_failure;
}
if (rt->rt_uses_gateway &&
nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
goto nla_put_failure;
expires = rt->dst.expires;
if (expires) {
unsigned long now = jiffies;
if (time_before(now, expires))
expires -= now;
else
expires = 0;
}
memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
if (rt->rt_pmtu && expires)
metrics[RTAX_MTU - 1] = rt->rt_pmtu;
if (rtnetlink_put_metrics(skb, metrics) < 0)
goto nla_put_failure;
if (fl4->flowi4_mark &&
nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
goto nla_put_failure;
if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
nla_put_u32(skb, RTA_UID,
from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
goto nla_put_failure;
error = rt->dst.error;
if (rt_is_input_route(rt)) {
#ifdef CONFIG_IP_MROUTE
if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
int err = ipmr_get_route(net, skb,
fl4->saddr, fl4->daddr,
r, portid);
if (err <= 0) {
if (err == 0)
return 0;
goto nla_put_failure;
}
} else
#endif
if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
goto nla_put_failure;
}
if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
struct fib_result res = {};
struct rtable *rt = NULL;
struct flowi4 fl4;
__be32 dst = 0;
__be32 src = 0;
u32 iif;
int err;
int mark;
struct sk_buff *skb;
u32 table_id = RT_TABLE_MAIN;
kuid_t uid;
err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy,
extack);
if (err < 0)
goto errout;
rtm = nlmsg_data(nlh);
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout;
}
/* Reserve room for dummy headers, this skb can pass
through good chunk of routing engine.
*/
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
if (tb[RTA_UID])
uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
else
uid = (iif ? INVALID_UID : current_uid());
/* Bugfix: need to give ip_route_input enough of an IP header to
* not gag.
*/
ip_hdr(skb)->protocol = IPPROTO_UDP;
ip_hdr(skb)->saddr = src;
ip_hdr(skb)->daddr = dst;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = dst;
fl4.saddr = src;
fl4.flowi4_tos = rtm->rtm_tos;
fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = uid;
rcu_read_lock();
if (iif) {
struct net_device *dev;
dev = dev_get_by_index_rcu(net, iif);
if (!dev) {
err = -ENODEV;
goto errout_free;
}
skb->protocol = htons(ETH_P_IP);
skb->dev = dev;
skb->mark = mark;
err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
dev, &res);
rt = skb_rtable(skb);
if (err == 0 && rt->dst.error)
err = -rt->dst.error;
} else {
rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
err = 0;
if (IS_ERR(rt))
err = PTR_ERR(rt);
else
skb_dst_set(skb, &rt->dst);
}
if (err)
goto errout_free;
if (rtm->rtm_flags & RTM_F_NOTIFY)
rt->rt_flags |= RTCF_NOTIFY;
if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
table_id = rt->rt_table_id;
if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
if (!res.fi) {
err = fib_props[res.type].error;
if (!err)
err = -EHOSTUNREACH;
goto errout_free;
}
err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
rt->rt_type, res.prefix, res.prefixlen,
fl4.flowi4_tos, res.fi, 0);
} else {
err = rt_fill_info(net, dst, src, table_id, &fl4, skb,
NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
}
if (err < 0)
goto errout_free;
rcu_read_unlock();
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
errout_free:
rcu_read_unlock();
kfree_skb(skb);
goto errout;
}
void ip_rt_multicast_event(struct in_device *in_dev)
{
rt_cache_flush(dev_net(in_dev->dev));
}
#ifdef CONFIG_SYSCTL
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)__ctl->extra1;
if (write) {
rt_cache_flush(net);
fnhe_genid_bump(net);
return 0;
}
return -EINVAL;
}
static struct ctl_table ipv4_route_table[] = {
{
.procname = "gc_thresh",
.data = &ipv4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "max_size",
.data = &ip_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
/* Deprecated. Use gc_min_interval_ms */
.procname = "gc_min_interval",
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_min_interval_ms",
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_ms_jiffies,
},
{
.procname = "gc_timeout",
.data = &ip_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "gc_interval",
.data = &ip_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "redirect_load",
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "redirect_number",
.data = &ip_rt_redirect_number,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "redirect_silence",
.data = &ip_rt_redirect_silence,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "error_cost",
.data = &ip_rt_error_cost,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "error_burst",
.data = &ip_rt_error_burst,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "gc_elasticity",
.data = &ip_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "mtu_expires",
.data = &ip_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "min_pmtu",
.data = &ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "min_adv_mss",
.data = &ip_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_table ipv4_route_flush_table[] = {
{
.procname = "flush",
.maxlen = sizeof(int),
.mode = 0200,
.proc_handler = ipv4_sysctl_rtcache_flush,
},
{ },
};
static __net_init int sysctl_route_net_init(struct net *net)
{
struct ctl_table *tbl;
tbl = ipv4_route_flush_table;
if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
if (!tbl)
goto err_dup;
/* Don't export sysctls to unprivileged users */
if (net->user_ns != &init_user_ns)
tbl[0].procname = NULL;
}
tbl[0].extra1 = net;
net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
if (!net->ipv4.route_hdr)
goto err_reg;
return 0;
err_reg:
if (tbl != ipv4_route_flush_table)
kfree(tbl);
err_dup:
return -ENOMEM;
}
static __net_exit void sysctl_route_net_exit(struct net *net)
{
struct ctl_table *tbl;
tbl = net->ipv4.route_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.route_hdr);
BUG_ON(tbl == ipv4_route_flush_table);
kfree(tbl);
}
static __net_initdata struct pernet_operations sysctl_route_ops = {
.init = sysctl_route_net_init,
.exit = sysctl_route_net_exit,
};
#endif
static __net_init int rt_genid_init(struct net *net)
{
atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
return 0;
}
static __net_initdata struct pernet_operations rt_genid_ops = {
.init = rt_genid_init,
};
static int __net_init ipv4_inetpeer_init(struct net *net)
{
struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
if (!bp)
return -ENOMEM;
inet_peer_base_init(bp);
net->ipv4.peers = bp;
return 0;
}
static void __net_exit ipv4_inetpeer_exit(struct net *net)
{
struct inet_peer_base *bp = net->ipv4.peers;
net->ipv4.peers = NULL;
inetpeer_invalidate_tree(bp);
kfree(bp);
}
static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
.init = ipv4_inetpeer_init,
.exit = ipv4_inetpeer_exit,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
#endif /* CONFIG_IP_ROUTE_CLASSID */
int __init ip_rt_init(void)
{
int rc = 0;
int cpu;
ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
if (!ip_idents)
panic("IP: failed to allocate ip_idents\n");
prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
if (!ip_tstamps)
panic("IP: failed to allocate ip_tstamps\n");
for_each_possible_cpu(cpu) {
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
spin_lock_init(&ul->lock);
}
#ifdef CONFIG_IP_ROUTE_CLASSID
ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
if (!ip_rt_acct)
panic("IP: failed to allocate ip_rt_acct\n");
#endif
ipv4_dst_ops.kmem_cachep =
kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
if (dst_entries_init(&ipv4_dst_ops) < 0)
panic("IP: failed to allocate ipv4_dst_ops counter\n");
if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
ipv4_dst_ops.gc_thresh = ~0;
ip_rt_max_size = INT_MAX;
devinet_init();
ip_fib_init();
if (ip_rt_proc_init())
pr_err("Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
xfrm4_init();
#endif
rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
#ifdef CONFIG_SYSCTL
register_pernet_subsys(&sysctl_route_ops);
#endif
register_pernet_subsys(&rt_genid_ops);
register_pernet_subsys(&ipv4_inetpeer_ops);
return rc;
}
#ifdef CONFIG_SYSCTL
/*
* We really need to sanitize the damn ipv4 init order, then all
* this nonsense will go away.
*/
void __init ip_static_sysctl_init(void)
{
register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_2744_0 |
crossvul-cpp_data_good_213_0 | // SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_bmap_btree.h"
#include "xfs_bmap.h"
#include "xfs_attr_sf.h"
#include "xfs_attr_remote.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_buf_item.h"
#include "xfs_cksum.h"
#include "xfs_dir2.h"
#include "xfs_log.h"
/*
* xfs_attr_leaf.c
*
* Routines to implement leaf blocks of attributes as Btrees of hashed names.
*/
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
/*
* Routines used for growing the Btree.
*/
STATIC int xfs_attr3_leaf_create(struct xfs_da_args *args,
xfs_dablk_t which_block, struct xfs_buf **bpp);
STATIC int xfs_attr3_leaf_add_work(struct xfs_buf *leaf_buffer,
struct xfs_attr3_icleaf_hdr *ichdr,
struct xfs_da_args *args, int freemap_index);
STATIC void xfs_attr3_leaf_compact(struct xfs_da_args *args,
struct xfs_attr3_icleaf_hdr *ichdr,
struct xfs_buf *leaf_buffer);
STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state,
xfs_da_state_blk_t *blk1,
xfs_da_state_blk_t *blk2);
STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state,
xfs_da_state_blk_t *leaf_blk_1,
struct xfs_attr3_icleaf_hdr *ichdr1,
xfs_da_state_blk_t *leaf_blk_2,
struct xfs_attr3_icleaf_hdr *ichdr2,
int *number_entries_in_blk1,
int *number_usedbytes_in_blk1);
/*
* Utility routines.
*/
STATIC void xfs_attr3_leaf_moveents(struct xfs_da_args *args,
struct xfs_attr_leafblock *src_leaf,
struct xfs_attr3_icleaf_hdr *src_ichdr, int src_start,
struct xfs_attr_leafblock *dst_leaf,
struct xfs_attr3_icleaf_hdr *dst_ichdr, int dst_start,
int move_count);
STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
/*
* attr3 block 'firstused' conversion helpers.
*
* firstused refers to the offset of the first used byte of the nameval region
* of an attr leaf block. The region starts at the tail of the block and expands
* backwards towards the middle. As such, firstused is initialized to the block
* size for an empty leaf block and is reduced from there.
*
* The attr3 block size is pegged to the fsb size and the maximum fsb is 64k.
* The in-core firstused field is 32-bit and thus supports the maximum fsb size.
* The on-disk field is only 16-bit, however, and overflows at 64k. Since this
* only occurs at exactly 64k, we use zero as a magic on-disk value to represent
* the attr block size. The following helpers manage the conversion between the
* in-core and on-disk formats.
*/
static void
xfs_attr3_leaf_firstused_from_disk(
struct xfs_da_geometry *geo,
struct xfs_attr3_icleaf_hdr *to,
struct xfs_attr_leafblock *from)
{
struct xfs_attr3_leaf_hdr *hdr3;
if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) {
hdr3 = (struct xfs_attr3_leaf_hdr *) from;
to->firstused = be16_to_cpu(hdr3->firstused);
} else {
to->firstused = be16_to_cpu(from->hdr.firstused);
}
/*
* Convert from the magic fsb size value to actual blocksize. This
* should only occur for empty blocks when the block size overflows
* 16-bits.
*/
if (to->firstused == XFS_ATTR3_LEAF_NULLOFF) {
ASSERT(!to->count && !to->usedbytes);
ASSERT(geo->blksize > USHRT_MAX);
to->firstused = geo->blksize;
}
}
static void
xfs_attr3_leaf_firstused_to_disk(
struct xfs_da_geometry *geo,
struct xfs_attr_leafblock *to,
struct xfs_attr3_icleaf_hdr *from)
{
struct xfs_attr3_leaf_hdr *hdr3;
uint32_t firstused;
/* magic value should only be seen on disk */
ASSERT(from->firstused != XFS_ATTR3_LEAF_NULLOFF);
/*
* Scale down the 32-bit in-core firstused value to the 16-bit on-disk
* value. This only overflows at the max supported value of 64k. Use the
* magic on-disk value to represent block size in this case.
*/
firstused = from->firstused;
if (firstused > USHRT_MAX) {
ASSERT(from->firstused == geo->blksize);
firstused = XFS_ATTR3_LEAF_NULLOFF;
}
if (from->magic == XFS_ATTR3_LEAF_MAGIC) {
hdr3 = (struct xfs_attr3_leaf_hdr *) to;
hdr3->firstused = cpu_to_be16(firstused);
} else {
to->hdr.firstused = cpu_to_be16(firstused);
}
}
void
xfs_attr3_leaf_hdr_from_disk(
struct xfs_da_geometry *geo,
struct xfs_attr3_icleaf_hdr *to,
struct xfs_attr_leafblock *from)
{
int i;
ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) {
struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)from;
to->forw = be32_to_cpu(hdr3->info.hdr.forw);
to->back = be32_to_cpu(hdr3->info.hdr.back);
to->magic = be16_to_cpu(hdr3->info.hdr.magic);
to->count = be16_to_cpu(hdr3->count);
to->usedbytes = be16_to_cpu(hdr3->usedbytes);
xfs_attr3_leaf_firstused_from_disk(geo, to, from);
to->holes = hdr3->holes;
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
to->freemap[i].base = be16_to_cpu(hdr3->freemap[i].base);
to->freemap[i].size = be16_to_cpu(hdr3->freemap[i].size);
}
return;
}
to->forw = be32_to_cpu(from->hdr.info.forw);
to->back = be32_to_cpu(from->hdr.info.back);
to->magic = be16_to_cpu(from->hdr.info.magic);
to->count = be16_to_cpu(from->hdr.count);
to->usedbytes = be16_to_cpu(from->hdr.usedbytes);
xfs_attr3_leaf_firstused_from_disk(geo, to, from);
to->holes = from->hdr.holes;
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base);
to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size);
}
}
void
xfs_attr3_leaf_hdr_to_disk(
struct xfs_da_geometry *geo,
struct xfs_attr_leafblock *to,
struct xfs_attr3_icleaf_hdr *from)
{
int i;
ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC ||
from->magic == XFS_ATTR3_LEAF_MAGIC);
if (from->magic == XFS_ATTR3_LEAF_MAGIC) {
struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)to;
hdr3->info.hdr.forw = cpu_to_be32(from->forw);
hdr3->info.hdr.back = cpu_to_be32(from->back);
hdr3->info.hdr.magic = cpu_to_be16(from->magic);
hdr3->count = cpu_to_be16(from->count);
hdr3->usedbytes = cpu_to_be16(from->usedbytes);
xfs_attr3_leaf_firstused_to_disk(geo, to, from);
hdr3->holes = from->holes;
hdr3->pad1 = 0;
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
hdr3->freemap[i].base = cpu_to_be16(from->freemap[i].base);
hdr3->freemap[i].size = cpu_to_be16(from->freemap[i].size);
}
return;
}
to->hdr.info.forw = cpu_to_be32(from->forw);
to->hdr.info.back = cpu_to_be32(from->back);
to->hdr.info.magic = cpu_to_be16(from->magic);
to->hdr.count = cpu_to_be16(from->count);
to->hdr.usedbytes = cpu_to_be16(from->usedbytes);
xfs_attr3_leaf_firstused_to_disk(geo, to, from);
to->hdr.holes = from->holes;
to->hdr.pad1 = 0;
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base);
to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size);
}
}
static xfs_failaddr_t
xfs_attr3_leaf_verify(
struct xfs_buf *bp)
{
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_perag *pag = bp->b_pag;
struct xfs_attr_leaf_entry *entries;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (ichdr.magic != XFS_ATTR3_LEAF_MAGIC)
return __this_address;
if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid))
return __this_address;
if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
return __this_address;
if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->info.lsn)))
return __this_address;
} else {
if (ichdr.magic != XFS_ATTR_LEAF_MAGIC)
return __this_address;
}
/*
* In recovery there is a transient state where count == 0 is valid
* because we may have transitioned an empty shortform attr to a leaf
* if the attr didn't fit in shortform.
*/
if (pag && pag->pagf_init && ichdr.count == 0)
return __this_address;
/*
* firstused is the block offset of the first name info structure.
* Make sure it doesn't go off the block or crash into the header.
*/
if (ichdr.firstused > mp->m_attr_geo->blksize)
return __this_address;
if (ichdr.firstused < xfs_attr3_leaf_hdr_size(leaf))
return __this_address;
/* Make sure the entries array doesn't crash into the name info. */
entries = xfs_attr3_leaf_entryp(bp->b_addr);
if ((char *)&entries[ichdr.count] >
(char *)bp->b_addr + ichdr.firstused)
return __this_address;
/* XXX: need to range check rest of attr header values */
/* XXX: hash order check? */
return NULL;
}
static void
xfs_attr3_leaf_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_log_item;
struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr;
xfs_failaddr_t fa;
fa = xfs_attr3_leaf_verify(bp);
if (fa) {
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
return;
}
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (bip)
hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF);
}
/*
* leaf/node format detection on trees is sketchy, so a node read can be done on
* leaf level blocks when detection identifies the tree as a node format tree
* incorrectly. In this case, we need to swap the verifier to match the correct
* format of the block being read.
*/
static void
xfs_attr3_leaf_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
xfs_failaddr_t fa;
if (xfs_sb_version_hascrc(&mp->m_sb) &&
!xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF))
xfs_verifier_error(bp, -EFSBADCRC, __this_address);
else {
fa = xfs_attr3_leaf_verify(bp);
if (fa)
xfs_verifier_error(bp, -EFSCORRUPTED, fa);
}
}
const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
.name = "xfs_attr3_leaf",
.verify_read = xfs_attr3_leaf_read_verify,
.verify_write = xfs_attr3_leaf_write_verify,
.verify_struct = xfs_attr3_leaf_verify,
};
int
xfs_attr3_leaf_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp)
{
int err;
err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
XFS_ATTR_FORK, &xfs_attr3_leaf_buf_ops);
if (!err && tp && *bpp)
xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
return err;
}
/*========================================================================
* Namespace helper routines
*========================================================================*/
/*
* If namespace bits don't match return 0.
* If all match then return 1.
*/
STATIC int
xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
{
return XFS_ATTR_NSP_ONDISK(ondisk_flags) == XFS_ATTR_NSP_ARGS_TO_ONDISK(arg_flags);
}
/*========================================================================
* External routines when attribute fork size < XFS_LITINO(mp).
*========================================================================*/
/*
* Query whether the requested number of additional bytes of extended
* attribute space will be able to fit inline.
*
* Returns zero if not, else the di_forkoff fork offset to be used in the
* literal area for attribute data once the new bytes have been added.
*
* di_forkoff must be 8 byte aligned, hence is stored as a >>3 value;
* special case for dev/uuid inodes, they have fixed size data forks.
*/
int
xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
{
int offset;
int minforkoff; /* lower limit on valid forkoff locations */
int maxforkoff; /* upper limit on valid forkoff locations */
int dsize;
xfs_mount_t *mp = dp->i_mount;
/* rounded down */
offset = (XFS_LITINO(mp, dp->i_d.di_version) - bytes) >> 3;
if (dp->i_d.di_format == XFS_DINODE_FMT_DEV) {
minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
return (offset >= minforkoff) ? minforkoff : 0;
}
/*
* If the requested numbers of bytes is smaller or equal to the
* current attribute fork size we can always proceed.
*
* Note that if_bytes in the data fork might actually be larger than
* the current data fork size is due to delalloc extents. In that
* case either the extent count will go down when they are converted
* to real extents, or the delalloc conversion will take care of the
* literal area rebalancing.
*/
if (bytes <= XFS_IFORK_ASIZE(dp))
return dp->i_d.di_forkoff;
/*
* For attr2 we can try to move the forkoff if there is space in the
* literal area, but for the old format we are done if there is no
* space in the fixed attribute fork.
*/
if (!(mp->m_flags & XFS_MOUNT_ATTR2))
return 0;
dsize = dp->i_df.if_bytes;
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
/*
* If there is no attr fork and the data fork is extents,
* determine if creating the default attr fork will result
* in the extents form migrating to btree. If so, the
* minimum offset only needs to be the space required for
* the btree root.
*/
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
xfs_default_attroffset(dp))
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
break;
case XFS_DINODE_FMT_BTREE:
/*
* If we have a data btree then keep forkoff if we have one,
* otherwise we are adding a new attr, so then we set
* minforkoff to where the btree root can finish so we have
* plenty of room for attrs
*/
if (dp->i_d.di_forkoff) {
if (offset < dp->i_d.di_forkoff)
return 0;
return dp->i_d.di_forkoff;
}
dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot);
break;
}
/*
* A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
minforkoff = max(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
minforkoff = roundup(minforkoff, 8) >> 3;
/* attr fork btree root can have at least this many key/ptr pairs */
maxforkoff = XFS_LITINO(mp, dp->i_d.di_version) -
XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
if (offset >= maxforkoff)
return maxforkoff;
if (offset >= minforkoff)
return offset;
return 0;
}
/*
* Switch on the ATTR2 superblock bit (implies also FEATURES2)
*/
STATIC void
xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
{
if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
!(xfs_sb_version_hasattr2(&mp->m_sb))) {
spin_lock(&mp->m_sb_lock);
if (!xfs_sb_version_hasattr2(&mp->m_sb)) {
xfs_sb_version_addattr2(&mp->m_sb);
spin_unlock(&mp->m_sb_lock);
xfs_log_sb(tp);
} else
spin_unlock(&mp->m_sb_lock);
}
}
/*
* Create the initial contents of a shortform attribute list.
*/
void
xfs_attr_shortform_create(xfs_da_args_t *args)
{
xfs_attr_sf_hdr_t *hdr;
xfs_inode_t *dp;
xfs_ifork_t *ifp;
trace_xfs_attr_sf_create(args);
dp = args->dp;
ASSERT(dp != NULL);
ifp = dp->i_afp;
ASSERT(ifp != NULL);
ASSERT(ifp->if_bytes == 0);
if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS) {
ifp->if_flags &= ~XFS_IFEXTENTS; /* just in case */
dp->i_d.di_aformat = XFS_DINODE_FMT_LOCAL;
ifp->if_flags |= XFS_IFINLINE;
} else {
ASSERT(ifp->if_flags & XFS_IFINLINE);
}
xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data;
hdr->count = 0;
hdr->totsize = cpu_to_be16(sizeof(*hdr));
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
/*
* Add a name/value pair to the shortform attribute list.
* Overflow from the inode has already been checked for.
*/
void
xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i, offset, size;
xfs_mount_t *mp;
xfs_inode_t *dp;
xfs_ifork_t *ifp;
trace_xfs_attr_sf_add(args);
dp = args->dp;
mp = dp->i_mount;
dp->i_d.di_forkoff = forkoff;
ifp = dp->i_afp;
ASSERT(ifp->if_flags & XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
#ifdef DEBUG
if (sfe->namelen != args->namelen)
continue;
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
ASSERT(0);
#endif
}
offset = (char *)sfe - (char *)sf;
size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen);
xfs_idata_realloc(dp, size, XFS_ATTR_FORK);
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
sfe = (xfs_attr_sf_entry_t *)((char *)sf + offset);
sfe->namelen = args->namelen;
sfe->valuelen = args->valuelen;
sfe->flags = XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
memcpy(sfe->nameval, args->name, args->namelen);
memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
sf->hdr.count++;
be16_add_cpu(&sf->hdr.totsize, size);
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
xfs_sbversion_add_attr2(mp, args->trans);
}
/*
* After the last attribute is removed revert to original inode format,
* making all literal area available to the data fork once more.
*/
void
xfs_attr_fork_remove(
struct xfs_inode *ip,
struct xfs_trans *tp)
{
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
ip->i_d.di_forkoff = 0;
ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
ASSERT(ip->i_d.di_anextents == 0);
ASSERT(ip->i_afp == NULL);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
/*
* Remove an attribute from the shortform attribute list structure.
*/
int
xfs_attr_shortform_remove(xfs_da_args_t *args)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int base, size=0, end, totsize, i;
xfs_mount_t *mp;
xfs_inode_t *dp;
trace_xfs_attr_sf_remove(args);
dp = args->dp;
mp = dp->i_mount;
base = sizeof(xfs_attr_sf_hdr_t);
sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data;
sfe = &sf->list[0];
end = sf->hdr.count;
for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe),
base += size, i++) {
size = XFS_ATTR_SF_ENTSIZE(sfe);
if (sfe->namelen != args->namelen)
continue;
if (memcmp(sfe->nameval, args->name, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
break;
}
if (i == end)
return -ENOATTR;
/*
* Fix up the attribute fork data, covering the hole
*/
end = base + size;
totsize = be16_to_cpu(sf->hdr.totsize);
if (end != totsize)
memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
sf->hdr.count--;
be16_add_cpu(&sf->hdr.totsize, -size);
/*
* Fix up the start offset of the attribute fork
*/
totsize -= size;
if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
(mp->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
!(args->op_flags & XFS_DA_OP_ADDNAME)) {
xfs_attr_fork_remove(dp, args->trans);
} else {
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
ASSERT(dp->i_d.di_forkoff);
ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
(args->op_flags & XFS_DA_OP_ADDNAME) ||
!(mp->m_flags & XFS_MOUNT_ATTR2) ||
dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
xfs_trans_log_inode(args->trans, dp,
XFS_ILOG_CORE | XFS_ILOG_ADATA);
}
xfs_sbversion_add_attr2(mp, args->trans);
return 0;
}
/*
* Look up a name in a shortform attribute list structure.
*/
/*ARGSUSED*/
int
xfs_attr_shortform_lookup(xfs_da_args_t *args)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
xfs_ifork_t *ifp;
trace_xfs_attr_sf_lookup(args);
ifp = args->dp->i_afp;
ASSERT(ifp->if_flags & XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
return -EEXIST;
}
return -ENOATTR;
}
/*
* Look up a name in a shortform attribute list structure.
*/
/*ARGSUSED*/
int
xfs_attr_shortform_getvalue(xfs_da_args_t *args)
{
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
int i;
ASSERT(args->dp->i_afp->if_flags == XFS_IFINLINE);
sf = (xfs_attr_shortform_t *)args->dp->i_afp->if_u1.if_data;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) {
if (sfe->namelen != args->namelen)
continue;
if (memcmp(args->name, sfe->nameval, args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, sfe->flags))
continue;
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = sfe->valuelen;
return -EEXIST;
}
if (args->valuelen < sfe->valuelen) {
args->valuelen = sfe->valuelen;
return -ERANGE;
}
args->valuelen = sfe->valuelen;
memcpy(args->value, &sfe->nameval[args->namelen],
args->valuelen);
return -EEXIST;
}
return -ENOATTR;
}
/*
* Convert from using the shortform to the leaf. On success, return the
* buffer so that we can keep it locked until we're totally done with it.
*/
int
xfs_attr_shortform_to_leaf(
struct xfs_da_args *args,
struct xfs_buf **leaf_bp)
{
xfs_inode_t *dp;
xfs_attr_shortform_t *sf;
xfs_attr_sf_entry_t *sfe;
xfs_da_args_t nargs;
char *tmpbuffer;
int error, i, size;
xfs_dablk_t blkno;
struct xfs_buf *bp;
xfs_ifork_t *ifp;
trace_xfs_attr_sf_to_leaf(args);
dp = args->dp;
ifp = dp->i_afp;
sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data;
size = be16_to_cpu(sf->hdr.totsize);
tmpbuffer = kmem_alloc(size, KM_SLEEP);
ASSERT(tmpbuffer != NULL);
memcpy(tmpbuffer, ifp->if_u1.if_data, size);
sf = (xfs_attr_shortform_t *)tmpbuffer;
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK);
bp = NULL;
error = xfs_da_grow_inode(args, &blkno);
if (error) {
/*
* If we hit an IO error middle of the transaction inside
* grow_inode(), we may have inconsistent data. Bail out.
*/
if (error == -EIO)
goto out;
xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
goto out;
}
ASSERT(blkno == 0);
error = xfs_attr3_leaf_create(args, blkno, &bp);
if (error) {
/* xfs_attr3_leaf_create may not have instantiated a block */
if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
goto out;
xfs_idata_realloc(dp, size, XFS_ATTR_FORK); /* try to put */
memcpy(ifp->if_u1.if_data, tmpbuffer, size); /* it back */
goto out;
}
memset((char *)&nargs, 0, sizeof(nargs));
nargs.dp = dp;
nargs.geo = args->geo;
nargs.firstblock = args->firstblock;
nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
nargs.op_flags = XFS_DA_OP_OKNOENT;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; i++) {
nargs.name = sfe->nameval;
nargs.namelen = sfe->namelen;
nargs.value = &sfe->nameval[nargs.namelen];
nargs.valuelen = sfe->valuelen;
nargs.hashval = xfs_da_hashname(sfe->nameval,
sfe->namelen);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
ASSERT(error == -ENOATTR);
error = xfs_attr3_leaf_add(bp, &nargs);
ASSERT(error != -ENOSPC);
if (error)
goto out;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
error = 0;
*leaf_bp = bp;
out:
kmem_free(tmpbuffer);
return error;
}
/*
* Check a leaf attribute block to see if all the entries would fit into
* a shortform attribute list.
*/
int
xfs_attr_shortform_allfit(
struct xfs_buf *bp,
struct xfs_inode *dp)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr_leaf_entry *entry;
xfs_attr_leaf_name_local_t *name_loc;
struct xfs_attr3_icleaf_hdr leafhdr;
int bytes;
int i;
struct xfs_mount *mp = bp->b_target->bt_mount;
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
entry = xfs_attr3_leaf_entryp(leaf);
bytes = sizeof(struct xfs_attr_sf_hdr);
for (i = 0; i < leafhdr.count; entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!(entry->flags & XFS_ATTR_LOCAL))
return 0;
name_loc = xfs_attr3_leaf_name_local(leaf, i);
if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
return 0;
if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
return 0;
bytes += sizeof(struct xfs_attr_sf_entry) - 1
+ name_loc->namelen
+ be16_to_cpu(name_loc->valuelen);
}
if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
(dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
(bytes == sizeof(struct xfs_attr_sf_hdr)))
return -1;
return xfs_attr_shortform_bytesfit(dp, bytes);
}
/* Verify the consistency of an inline attribute fork. */
xfs_failaddr_t
xfs_attr_shortform_verify(
struct xfs_inode *ip)
{
struct xfs_attr_shortform *sfp;
struct xfs_attr_sf_entry *sfep;
struct xfs_attr_sf_entry *next_sfep;
char *endp;
struct xfs_ifork *ifp;
int i;
int size;
ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL);
ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
size = ifp->if_bytes;
/*
* Give up if the attribute is way too short.
*/
if (size < sizeof(struct xfs_attr_sf_hdr))
return __this_address;
endp = (char *)sfp + size;
/* Check all reported entries */
sfep = &sfp->list[0];
for (i = 0; i < sfp->hdr.count; i++) {
/*
* struct xfs_attr_sf_entry has a variable length.
* Check the fixed-offset parts of the structure are
* within the data buffer.
*/
if (((char *)sfep + sizeof(*sfep)) >= endp)
return __this_address;
/* Don't allow names with known bad length. */
if (sfep->namelen == 0)
return __this_address;
/*
* Check that the variable-length part of the structure is
* within the data buffer. The next entry starts after the
* name component, so nextentry is an acceptable test.
*/
next_sfep = XFS_ATTR_SF_NEXTENTRY(sfep);
if ((char *)next_sfep > endp)
return __this_address;
/*
* Check for unknown flags. Short form doesn't support
* the incomplete or local bits, so we can use the namespace
* mask here.
*/
if (sfep->flags & ~XFS_ATTR_NSP_ONDISK_MASK)
return __this_address;
/*
* Check for invalid namespace combinations. We only allow
* one namespace flag per xattr, so we can just count the
* bits (i.e. hweight) here.
*/
if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1)
return __this_address;
sfep = next_sfep;
}
if ((void *)sfep != (void *)endp)
return __this_address;
return NULL;
}
/*
* Convert a leaf attribute list to shortform attribute list
*/
int
xfs_attr3_leaf_to_shortform(
struct xfs_buf *bp,
struct xfs_da_args *args,
int forkoff)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_local *name_loc;
struct xfs_da_args nargs;
struct xfs_inode *dp = args->dp;
char *tmpbuffer;
int error;
int i;
trace_xfs_attr_leaf_to_sf(args);
tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP);
if (!tmpbuffer)
return -ENOMEM;
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
leaf = (xfs_attr_leafblock_t *)tmpbuffer;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entry = xfs_attr3_leaf_entryp(leaf);
/* XXX (dgc): buffer is about to be marked stale - why zero it? */
memset(bp->b_addr, 0, args->geo->blksize);
/*
* Clean out the prior contents of the attribute list.
*/
error = xfs_da_shrink_inode(args, 0, bp);
if (error)
goto out;
if (forkoff == -1) {
ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
xfs_attr_fork_remove(dp, args->trans);
goto out;
}
xfs_attr_shortform_create(args);
/*
* Copy the attributes
*/
memset((char *)&nargs, 0, sizeof(nargs));
nargs.geo = args->geo;
nargs.dp = dp;
nargs.firstblock = args->firstblock;
nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
nargs.op_flags = XFS_DA_OP_OKNOENT;
for (i = 0; i < ichdr.count; entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
continue; /* don't copy partial entries */
if (!entry->nameidx)
continue;
ASSERT(entry->flags & XFS_ATTR_LOCAL);
name_loc = xfs_attr3_leaf_name_local(leaf, i);
nargs.name = name_loc->nameval;
nargs.namelen = name_loc->namelen;
nargs.value = &name_loc->nameval[nargs.namelen];
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
nargs.hashval = be32_to_cpu(entry->hashval);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
xfs_attr_shortform_add(&nargs, forkoff);
}
error = 0;
out:
kmem_free(tmpbuffer);
return error;
}
/*
* Convert from using a single leaf to a root node and a leaf.
*/
int
xfs_attr3_leaf_to_node(
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr icleafhdr;
struct xfs_attr_leaf_entry *entries;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr icnodehdr;
struct xfs_da_intnode *node;
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp1 = NULL;
struct xfs_buf *bp2 = NULL;
xfs_dablk_t blkno;
int error;
trace_xfs_attr_leaf_to_node(args);
error = xfs_da_grow_inode(args, &blkno);
if (error)
goto out;
error = xfs_attr3_leaf_read(args->trans, dp, 0, -1, &bp1);
if (error)
goto out;
error = xfs_da_get_buf(args->trans, dp, blkno, -1, &bp2, XFS_ATTR_FORK);
if (error)
goto out;
/* copy leaf to new buffer, update identifiers */
xfs_trans_buf_set_type(args->trans, bp2, XFS_BLFT_ATTR_LEAF_BUF);
bp2->b_ops = bp1->b_ops;
memcpy(bp2->b_addr, bp1->b_addr, args->geo->blksize);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_blkinfo *hdr3 = bp2->b_addr;
hdr3->blkno = cpu_to_be64(bp2->b_bn);
}
xfs_trans_log_buf(args->trans, bp2, 0, args->geo->blksize - 1);
/*
* Set up the new root node.
*/
error = xfs_da3_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
if (error)
goto out;
node = bp1->b_addr;
dp->d_ops->node_hdr_from_disk(&icnodehdr, node);
btree = dp->d_ops->node_tree_p(node);
leaf = bp2->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
/* both on-disk, don't endian-flip twice */
btree[0].hashval = entries[icleafhdr.count - 1].hashval;
btree[0].before = cpu_to_be32(blkno);
icnodehdr.count = 1;
dp->d_ops->node_hdr_to_disk(node, &icnodehdr);
xfs_trans_log_buf(args->trans, bp1, 0, args->geo->blksize - 1);
error = 0;
out:
return error;
}
/*========================================================================
* Routines used for growing the Btree.
*========================================================================*/
/*
* Create the initial contents of a leaf attribute list
* or a leaf in a node attribute list.
*/
STATIC int
xfs_attr3_leaf_create(
struct xfs_da_args *args,
xfs_dablk_t blkno,
struct xfs_buf **bpp)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
struct xfs_buf *bp;
int error;
trace_xfs_attr_leaf_create(args);
error = xfs_da_get_buf(args->trans, args->dp, blkno, -1, &bp,
XFS_ATTR_FORK);
if (error)
return error;
bp->b_ops = &xfs_attr3_leaf_buf_ops;
xfs_trans_buf_set_type(args->trans, bp, XFS_BLFT_ATTR_LEAF_BUF);
leaf = bp->b_addr;
memset(leaf, 0, args->geo->blksize);
memset(&ichdr, 0, sizeof(ichdr));
ichdr.firstused = args->geo->blksize;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_blkinfo *hdr3 = bp->b_addr;
ichdr.magic = XFS_ATTR3_LEAF_MAGIC;
hdr3->blkno = cpu_to_be64(bp->b_bn);
hdr3->owner = cpu_to_be64(dp->i_ino);
uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr);
} else {
ichdr.magic = XFS_ATTR_LEAF_MAGIC;
ichdr.freemap[0].base = sizeof(struct xfs_attr_leaf_hdr);
}
ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base;
xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1);
*bpp = bp;
return 0;
}
/*
* Split the leaf node, rebalance, then add the new entry.
*/
int
xfs_attr3_leaf_split(
struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk)
{
xfs_dablk_t blkno;
int error;
trace_xfs_attr_leaf_split(state->args);
/*
* Allocate space for a new leaf node.
*/
ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC);
error = xfs_da_grow_inode(state->args, &blkno);
if (error)
return error;
error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp);
if (error)
return error;
newblk->blkno = blkno;
newblk->magic = XFS_ATTR_LEAF_MAGIC;
/*
* Rebalance the entries across the two leaves.
* NOTE: rebalance() currently depends on the 2nd block being empty.
*/
xfs_attr3_leaf_rebalance(state, oldblk, newblk);
error = xfs_da3_blk_link(state, oldblk, newblk);
if (error)
return error;
/*
* Save info on "old" attribute for "atomic rename" ops, leaf_add()
* modifies the index/blkno/rmtblk/rmtblkcnt fields to show the
* "new" attrs info. Will need the "old" info to remove it later.
*
* Insert the "new" entry in the correct block.
*/
if (state->inleaf) {
trace_xfs_attr_leaf_add_old(state->args);
error = xfs_attr3_leaf_add(oldblk->bp, state->args);
} else {
trace_xfs_attr_leaf_add_new(state->args);
error = xfs_attr3_leaf_add(newblk->bp, state->args);
}
/*
* Update last hashval in each block since we added the name.
*/
oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL);
newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL);
return error;
}
/*
* Add a name to the leaf attribute list structure.
*/
int
xfs_attr3_leaf_add(
struct xfs_buf *bp,
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr ichdr;
int tablesize;
int entsize;
int sum;
int tmp;
int i;
trace_xfs_attr_leaf_add(args);
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(args->index >= 0 && args->index <= ichdr.count);
entsize = xfs_attr_leaf_newentsize(args, NULL);
/*
* Search through freemap for first-fit on new name length.
* (may need to figure in size of entry struct too)
*/
tablesize = (ichdr.count + 1) * sizeof(xfs_attr_leaf_entry_t)
+ xfs_attr3_leaf_hdr_size(leaf);
for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE - 1; i >= 0; i--) {
if (tablesize > ichdr.firstused) {
sum += ichdr.freemap[i].size;
continue;
}
if (!ichdr.freemap[i].size)
continue; /* no space in this map */
tmp = entsize;
if (ichdr.freemap[i].base < ichdr.firstused)
tmp += sizeof(xfs_attr_leaf_entry_t);
if (ichdr.freemap[i].size >= tmp) {
tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i);
goto out_log_hdr;
}
sum += ichdr.freemap[i].size;
}
/*
* If there are no holes in the address space of the block,
* and we don't have enough freespace, then compaction will do us
* no good and we should just give up.
*/
if (!ichdr.holes && sum < entsize)
return -ENOSPC;
/*
* Compact the entries to coalesce free space.
* This may change the hdr->count via dropping INCOMPLETE entries.
*/
xfs_attr3_leaf_compact(args, &ichdr, bp);
/*
* After compaction, the block is guaranteed to have only one
* free region, in freemap[0]. If it is not big enough, give up.
*/
if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) {
tmp = -ENOSPC;
goto out_log_hdr;
}
tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0);
out_log_hdr:
xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, &leaf->hdr,
xfs_attr3_leaf_hdr_size(leaf)));
return tmp;
}
/*
* Add a name to a leaf attribute list structure.
*/
STATIC int
xfs_attr3_leaf_add_work(
struct xfs_buf *bp,
struct xfs_attr3_icleaf_hdr *ichdr,
struct xfs_da_args *args,
int mapindex)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_local *name_loc;
struct xfs_attr_leaf_name_remote *name_rmt;
struct xfs_mount *mp;
int tmp;
int i;
trace_xfs_attr_leaf_add_work(args);
leaf = bp->b_addr;
ASSERT(mapindex >= 0 && mapindex < XFS_ATTR_LEAF_MAPSIZE);
ASSERT(args->index >= 0 && args->index <= ichdr->count);
/*
* Force open some space in the entry array and fill it in.
*/
entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
if (args->index < ichdr->count) {
tmp = ichdr->count - args->index;
tmp *= sizeof(xfs_attr_leaf_entry_t);
memmove(entry + 1, entry, tmp);
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
}
ichdr->count++;
/*
* Allocate space for the new string (at the end of the run).
*/
mp = args->trans->t_mountp;
ASSERT(ichdr->freemap[mapindex].base < args->geo->blksize);
ASSERT((ichdr->freemap[mapindex].base & 0x3) == 0);
ASSERT(ichdr->freemap[mapindex].size >=
xfs_attr_leaf_newentsize(args, NULL));
ASSERT(ichdr->freemap[mapindex].size < args->geo->blksize);
ASSERT((ichdr->freemap[mapindex].size & 0x3) == 0);
ichdr->freemap[mapindex].size -= xfs_attr_leaf_newentsize(args, &tmp);
entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base +
ichdr->freemap[mapindex].size);
entry->hashval = cpu_to_be32(args->hashval);
entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
if (args->op_flags & XFS_DA_OP_RENAME) {
entry->flags |= XFS_ATTR_INCOMPLETE;
if ((args->blkno2 == args->blkno) &&
(args->index2 <= args->index)) {
args->index2++;
}
}
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
ASSERT((args->index == 0) ||
(be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
ASSERT((args->index == ichdr->count - 1) ||
(be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval)));
/*
* For "remote" attribute values, simply note that we need to
* allocate space for the "remote" value. We can't actually
* allocate the extents in this transaction, and we can't decide
* which blocks they should be as we might allocate more blocks
* as part of this transaction (a split operation for example).
*/
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
name_loc->namelen = args->namelen;
name_loc->valuelen = cpu_to_be16(args->valuelen);
memcpy((char *)name_loc->nameval, args->name, args->namelen);
memcpy((char *)&name_loc->nameval[args->namelen], args->value,
be16_to_cpu(name_loc->valuelen));
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
name_rmt->namelen = args->namelen;
memcpy((char *)name_rmt->name, args->name, args->namelen);
entry->flags |= XFS_ATTR_INCOMPLETE;
/* just in case */
name_rmt->valuelen = 0;
name_rmt->valueblk = 0;
args->rmtblkno = 1;
args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
args->rmtvaluelen = args->valuelen;
}
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
xfs_attr_leaf_entsize(leaf, args->index)));
/*
* Update the control info for this leaf node
*/
if (be16_to_cpu(entry->nameidx) < ichdr->firstused)
ichdr->firstused = be16_to_cpu(entry->nameidx);
ASSERT(ichdr->firstused >= ichdr->count * sizeof(xfs_attr_leaf_entry_t)
+ xfs_attr3_leaf_hdr_size(leaf));
tmp = (ichdr->count - 1) * sizeof(xfs_attr_leaf_entry_t)
+ xfs_attr3_leaf_hdr_size(leaf);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr->freemap[i].base == tmp) {
ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
}
}
ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
return 0;
}
/*
* Garbage collect a leaf attribute list block by copying it to a new buffer.
*/
STATIC void
xfs_attr3_leaf_compact(
struct xfs_da_args *args,
struct xfs_attr3_icleaf_hdr *ichdr_dst,
struct xfs_buf *bp)
{
struct xfs_attr_leafblock *leaf_src;
struct xfs_attr_leafblock *leaf_dst;
struct xfs_attr3_icleaf_hdr ichdr_src;
struct xfs_trans *trans = args->trans;
char *tmpbuffer;
trace_xfs_attr_leaf_compact(args);
tmpbuffer = kmem_alloc(args->geo->blksize, KM_SLEEP);
memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
memset(bp->b_addr, 0, args->geo->blksize);
leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
leaf_dst = bp->b_addr;
/*
* Copy the on-disk header back into the destination buffer to ensure
* all the information in the header that is not part of the incore
* header structure is preserved.
*/
memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
/* Initialise the incore headers */
ichdr_src = *ichdr_dst; /* struct copy */
ichdr_dst->firstused = args->geo->blksize;
ichdr_dst->usedbytes = 0;
ichdr_dst->count = 0;
ichdr_dst->holes = 0;
ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
ichdr_dst->freemap[0].size = ichdr_dst->firstused -
ichdr_dst->freemap[0].base;
/* write the header back to initialise the underlying buffer */
xfs_attr3_leaf_hdr_to_disk(args->geo, leaf_dst, ichdr_dst);
/*
* Copy all entry's in the same (sorted) order,
* but allocate name/value pairs packed and in sequence.
*/
xfs_attr3_leaf_moveents(args, leaf_src, &ichdr_src, 0,
leaf_dst, ichdr_dst, 0, ichdr_src.count);
/*
* this logs the entire buffer, but the caller must write the header
* back to the buffer when it is finished modifying it.
*/
xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1);
kmem_free(tmpbuffer);
}
/*
* Compare two leaf blocks "order".
* Return 0 unless leaf2 should go before leaf1.
*/
static int
xfs_attr3_leaf_order(
struct xfs_buf *leaf1_bp,
struct xfs_attr3_icleaf_hdr *leaf1hdr,
struct xfs_buf *leaf2_bp,
struct xfs_attr3_icleaf_hdr *leaf2hdr)
{
struct xfs_attr_leaf_entry *entries1;
struct xfs_attr_leaf_entry *entries2;
entries1 = xfs_attr3_leaf_entryp(leaf1_bp->b_addr);
entries2 = xfs_attr3_leaf_entryp(leaf2_bp->b_addr);
if (leaf1hdr->count > 0 && leaf2hdr->count > 0 &&
((be32_to_cpu(entries2[0].hashval) <
be32_to_cpu(entries1[0].hashval)) ||
(be32_to_cpu(entries2[leaf2hdr->count - 1].hashval) <
be32_to_cpu(entries1[leaf1hdr->count - 1].hashval)))) {
return 1;
}
return 0;
}
int
xfs_attr_leaf_order(
struct xfs_buf *leaf1_bp,
struct xfs_buf *leaf2_bp)
{
struct xfs_attr3_icleaf_hdr ichdr1;
struct xfs_attr3_icleaf_hdr ichdr2;
struct xfs_mount *mp = leaf1_bp->b_target->bt_mount;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr1, leaf1_bp->b_addr);
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr2, leaf2_bp->b_addr);
return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2);
}
/*
* Redistribute the attribute list entries between two leaf nodes,
* taking into account the size of the new entry.
*
* NOTE: if new block is empty, then it will get the upper half of the
* old block. At present, all (one) callers pass in an empty second block.
*
* This code adjusts the args->index/blkno and args->index2/blkno2 fields
* to match what it is doing in splitting the attribute leaf block. Those
* values are used in "atomic rename" operations on attributes. Note that
* the "new" and "old" values can end up in different blocks.
*/
STATIC void
xfs_attr3_leaf_rebalance(
struct xfs_da_state *state,
struct xfs_da_state_blk *blk1,
struct xfs_da_state_blk *blk2)
{
struct xfs_da_args *args;
struct xfs_attr_leafblock *leaf1;
struct xfs_attr_leafblock *leaf2;
struct xfs_attr3_icleaf_hdr ichdr1;
struct xfs_attr3_icleaf_hdr ichdr2;
struct xfs_attr_leaf_entry *entries1;
struct xfs_attr_leaf_entry *entries2;
int count;
int totallen;
int max;
int space;
int swap;
/*
* Set up environment.
*/
ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr1, leaf1);
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, leaf2);
ASSERT(ichdr2.count == 0);
args = state->args;
trace_xfs_attr_leaf_rebalance(args);
/*
* Check ordering of blocks, reverse if it makes things simpler.
*
* NOTE: Given that all (current) callers pass in an empty
* second block, this code should never set "swap".
*/
swap = 0;
if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) {
struct xfs_da_state_blk *tmp_blk;
struct xfs_attr3_icleaf_hdr tmp_ichdr;
tmp_blk = blk1;
blk1 = blk2;
blk2 = tmp_blk;
/* struct copies to swap them rather than reconverting */
tmp_ichdr = ichdr1;
ichdr1 = ichdr2;
ichdr2 = tmp_ichdr;
leaf1 = blk1->bp->b_addr;
leaf2 = blk2->bp->b_addr;
swap = 1;
}
/*
* Examine entries until we reduce the absolute difference in
* byte usage between the two blocks to a minimum. Then get
* the direction to copy and the number of elements to move.
*
* "inleaf" is true if the new entry should be inserted into blk1.
* If "swap" is also true, then reverse the sense of "inleaf".
*/
state->inleaf = xfs_attr3_leaf_figure_balance(state, blk1, &ichdr1,
blk2, &ichdr2,
&count, &totallen);
if (swap)
state->inleaf = !state->inleaf;
/*
* Move any entries required from leaf to leaf:
*/
if (count < ichdr1.count) {
/*
* Figure the total bytes to be added to the destination leaf.
*/
/* number entries being moved */
count = ichdr1.count - count;
space = ichdr1.usedbytes - totallen;
space += count * sizeof(xfs_attr_leaf_entry_t);
/*
* leaf2 is the destination, compact it if it looks tight.
*/
max = ichdr2.firstused - xfs_attr3_leaf_hdr_size(leaf1);
max -= ichdr2.count * sizeof(xfs_attr_leaf_entry_t);
if (space > max)
xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp);
/*
* Move high entries from leaf1 to low end of leaf2.
*/
xfs_attr3_leaf_moveents(args, leaf1, &ichdr1,
ichdr1.count - count, leaf2, &ichdr2, 0, count);
} else if (count > ichdr1.count) {
/*
* I assert that since all callers pass in an empty
* second buffer, this code should never execute.
*/
ASSERT(0);
/*
* Figure the total bytes to be added to the destination leaf.
*/
/* number entries being moved */
count -= ichdr1.count;
space = totallen - ichdr1.usedbytes;
space += count * sizeof(xfs_attr_leaf_entry_t);
/*
* leaf1 is the destination, compact it if it looks tight.
*/
max = ichdr1.firstused - xfs_attr3_leaf_hdr_size(leaf1);
max -= ichdr1.count * sizeof(xfs_attr_leaf_entry_t);
if (space > max)
xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp);
/*
* Move low entries from leaf2 to high end of leaf1.
*/
xfs_attr3_leaf_moveents(args, leaf2, &ichdr2, 0, leaf1, &ichdr1,
ichdr1.count, count);
}
xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf1, &ichdr1);
xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf2, &ichdr2);
xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1);
xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1);
/*
* Copy out last hashval in each block for B-tree code.
*/
entries1 = xfs_attr3_leaf_entryp(leaf1);
entries2 = xfs_attr3_leaf_entryp(leaf2);
blk1->hashval = be32_to_cpu(entries1[ichdr1.count - 1].hashval);
blk2->hashval = be32_to_cpu(entries2[ichdr2.count - 1].hashval);
/*
* Adjust the expected index for insertion.
* NOTE: this code depends on the (current) situation that the
* second block was originally empty.
*
* If the insertion point moved to the 2nd block, we must adjust
* the index. We must also track the entry just following the
* new entry for use in an "atomic rename" operation, that entry
* is always the "old" entry and the "new" entry is what we are
* inserting. The index/blkno fields refer to the "old" entry,
* while the index2/blkno2 fields refer to the "new" entry.
*/
if (blk1->index > ichdr1.count) {
ASSERT(state->inleaf == 0);
blk2->index = blk1->index - ichdr1.count;
args->index = args->index2 = blk2->index;
args->blkno = args->blkno2 = blk2->blkno;
} else if (blk1->index == ichdr1.count) {
if (state->inleaf) {
args->index = blk1->index;
args->blkno = blk1->blkno;
args->index2 = 0;
args->blkno2 = blk2->blkno;
} else {
/*
* On a double leaf split, the original attr location
* is already stored in blkno2/index2, so don't
* overwrite it overwise we corrupt the tree.
*/
blk2->index = blk1->index - ichdr1.count;
args->index = blk2->index;
args->blkno = blk2->blkno;
if (!state->extravalid) {
/*
* set the new attr location to match the old
* one and let the higher level split code
* decide where in the leaf to place it.
*/
args->index2 = blk2->index;
args->blkno2 = blk2->blkno;
}
}
} else {
ASSERT(state->inleaf == 1);
args->index = args->index2 = blk1->index;
args->blkno = args->blkno2 = blk1->blkno;
}
}
/*
* Examine entries until we reduce the absolute difference in
* byte usage between the two blocks to a minimum.
* GROT: Is this really necessary? With other than a 512 byte blocksize,
* GROT: there will always be enough room in either block for a new entry.
* GROT: Do a double-split for this case?
*/
STATIC int
xfs_attr3_leaf_figure_balance(
struct xfs_da_state *state,
struct xfs_da_state_blk *blk1,
struct xfs_attr3_icleaf_hdr *ichdr1,
struct xfs_da_state_blk *blk2,
struct xfs_attr3_icleaf_hdr *ichdr2,
int *countarg,
int *usedbytesarg)
{
struct xfs_attr_leafblock *leaf1 = blk1->bp->b_addr;
struct xfs_attr_leafblock *leaf2 = blk2->bp->b_addr;
struct xfs_attr_leaf_entry *entry;
int count;
int max;
int index;
int totallen = 0;
int half;
int lastdelta;
int foundit = 0;
int tmp;
/*
* Examine entries until we reduce the absolute difference in
* byte usage between the two blocks to a minimum.
*/
max = ichdr1->count + ichdr2->count;
half = (max + 1) * sizeof(*entry);
half += ichdr1->usedbytes + ichdr2->usedbytes +
xfs_attr_leaf_newentsize(state->args, NULL);
half /= 2;
lastdelta = state->args->geo->blksize;
entry = xfs_attr3_leaf_entryp(leaf1);
for (count = index = 0; count < max; entry++, index++, count++) {
#define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A))
/*
* The new entry is in the first block, account for it.
*/
if (count == blk1->index) {
tmp = totallen + sizeof(*entry) +
xfs_attr_leaf_newentsize(state->args, NULL);
if (XFS_ATTR_ABS(half - tmp) > lastdelta)
break;
lastdelta = XFS_ATTR_ABS(half - tmp);
totallen = tmp;
foundit = 1;
}
/*
* Wrap around into the second block if necessary.
*/
if (count == ichdr1->count) {
leaf1 = leaf2;
entry = xfs_attr3_leaf_entryp(leaf1);
index = 0;
}
/*
* Figure out if next leaf entry would be too much.
*/
tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1,
index);
if (XFS_ATTR_ABS(half - tmp) > lastdelta)
break;
lastdelta = XFS_ATTR_ABS(half - tmp);
totallen = tmp;
#undef XFS_ATTR_ABS
}
/*
* Calculate the number of usedbytes that will end up in lower block.
* If new entry not in lower block, fix up the count.
*/
totallen -= count * sizeof(*entry);
if (foundit) {
totallen -= sizeof(*entry) +
xfs_attr_leaf_newentsize(state->args, NULL);
}
*countarg = count;
*usedbytesarg = totallen;
return foundit;
}
/*========================================================================
* Routines used for shrinking the Btree.
*========================================================================*/
/*
* Check a leaf block and its neighbors to see if the block should be
* collapsed into one or the other neighbor. Always keep the block
* with the smaller block number.
* If the current block is over 50% full, don't try to join it, return 0.
* If the block is empty, fill in the state structure and return 2.
* If it can be collapsed, fill in the state structure and return 1.
* If nothing can be done, return 0.
*
* GROT: allow for INCOMPLETE entries in calculation.
*/
int
xfs_attr3_leaf_toosmall(
struct xfs_da_state *state,
int *action)
{
struct xfs_attr_leafblock *leaf;
struct xfs_da_state_blk *blk;
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_buf *bp;
xfs_dablk_t blkno;
int bytes;
int forward;
int error;
int retval;
int i;
trace_xfs_attr_leaf_toosmall(state->args);
/*
* Check for the degenerate case of the block being over 50% full.
* If so, it's not worth even looking to see if we might be able
* to coalesce with a sibling.
*/
blk = &state->path.blk[ state->path.active-1 ];
leaf = blk->bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr, leaf);
bytes = xfs_attr3_leaf_hdr_size(leaf) +
ichdr.count * sizeof(xfs_attr_leaf_entry_t) +
ichdr.usedbytes;
if (bytes > (state->args->geo->blksize >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return 0;
}
/*
* Check for the degenerate case of the block being empty.
* If the block is empty, we'll simply delete it, no need to
* coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL.
*/
if (ichdr.count == 0) {
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop (this one).
*/
forward = (ichdr.forw != 0);
memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
return error;
if (retval) {
*action = 0;
} else {
*action = 2;
}
return 0;
}
/*
* Examine each sibling block to see if we can coalesce with
* at least 25% free space to spare. We need to figure out
* whether to merge with the forward or the backward block.
* We prefer coalescing with the lower numbered sibling so as
* to shrink an attribute list over time.
*/
/* start with smaller blk num */
forward = ichdr.forw < ichdr.back;
for (i = 0; i < 2; forward = !forward, i++) {
struct xfs_attr3_icleaf_hdr ichdr2;
if (forward)
blkno = ichdr.forw;
else
blkno = ichdr.back;
if (blkno == 0)
continue;
error = xfs_attr3_leaf_read(state->args->trans, state->args->dp,
blkno, -1, &bp);
if (error)
return error;
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, bp->b_addr);
bytes = state->args->geo->blksize -
(state->args->geo->blksize >> 2) -
ichdr.usedbytes - ichdr2.usedbytes -
((ichdr.count + ichdr2.count) *
sizeof(xfs_attr_leaf_entry_t)) -
xfs_attr3_leaf_hdr_size(leaf);
xfs_trans_brelse(state->args->trans, bp);
if (bytes >= 0)
break; /* fits with at least 25% to spare */
}
if (i >= 2) {
*action = 0;
return 0;
}
/*
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
} else {
error = xfs_da3_path_shift(state, &state->path, forward,
0, &retval);
}
if (error)
return error;
if (retval) {
*action = 0;
} else {
*action = 1;
}
return 0;
}
/*
* Remove a name from the leaf attribute list structure.
*
* Return 1 if leaf is less than 37% full, 0 if >= 37% full.
* If two leaves are 37% full, when combined they will leave 25% free.
*/
int
xfs_attr3_leaf_remove(
struct xfs_buf *bp,
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_attr_leaf_entry *entry;
int before;
int after;
int smallest;
int entsize;
int tablesize;
int tmp;
int i;
trace_xfs_attr_leaf_remove(args);
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(ichdr.count > 0 && ichdr.count < args->geo->blksize / 8);
ASSERT(args->index >= 0 && args->index < ichdr.count);
ASSERT(ichdr.firstused >= ichdr.count * sizeof(*entry) +
xfs_attr3_leaf_hdr_size(leaf));
entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize);
/*
* Scan through free region table:
* check for adjacency of free'd entry with an existing one,
* find smallest free region in case we need to replace it,
* adjust any map that borders the entry table,
*/
tablesize = ichdr.count * sizeof(xfs_attr_leaf_entry_t)
+ xfs_attr3_leaf_hdr_size(leaf);
tmp = ichdr.freemap[0].size;
before = after = -1;
smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
entsize = xfs_attr_leaf_entsize(leaf, args->index);
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
ASSERT(ichdr.freemap[i].base < args->geo->blksize);
ASSERT(ichdr.freemap[i].size < args->geo->blksize);
if (ichdr.freemap[i].base == tablesize) {
ichdr.freemap[i].base -= sizeof(xfs_attr_leaf_entry_t);
ichdr.freemap[i].size += sizeof(xfs_attr_leaf_entry_t);
}
if (ichdr.freemap[i].base + ichdr.freemap[i].size ==
be16_to_cpu(entry->nameidx)) {
before = i;
} else if (ichdr.freemap[i].base ==
(be16_to_cpu(entry->nameidx) + entsize)) {
after = i;
} else if (ichdr.freemap[i].size < tmp) {
tmp = ichdr.freemap[i].size;
smallest = i;
}
}
/*
* Coalesce adjacent freemap regions,
* or replace the smallest region.
*/
if ((before >= 0) || (after >= 0)) {
if ((before >= 0) && (after >= 0)) {
ichdr.freemap[before].size += entsize;
ichdr.freemap[before].size += ichdr.freemap[after].size;
ichdr.freemap[after].base = 0;
ichdr.freemap[after].size = 0;
} else if (before >= 0) {
ichdr.freemap[before].size += entsize;
} else {
ichdr.freemap[after].base = be16_to_cpu(entry->nameidx);
ichdr.freemap[after].size += entsize;
}
} else {
/*
* Replace smallest region (if it is smaller than free'd entry)
*/
if (ichdr.freemap[smallest].size < entsize) {
ichdr.freemap[smallest].base = be16_to_cpu(entry->nameidx);
ichdr.freemap[smallest].size = entsize;
}
}
/*
* Did we remove the first entry?
*/
if (be16_to_cpu(entry->nameidx) == ichdr.firstused)
smallest = 1;
else
smallest = 0;
/*
* Compress the remaining entries and zero out the removed stuff.
*/
memset(xfs_attr3_leaf_name(leaf, args->index), 0, entsize);
ichdr.usedbytes -= entsize;
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
entsize));
tmp = (ichdr.count - args->index) * sizeof(xfs_attr_leaf_entry_t);
memmove(entry, entry + 1, tmp);
ichdr.count--;
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(xfs_attr_leaf_entry_t)));
entry = &xfs_attr3_leaf_entryp(leaf)[ichdr.count];
memset(entry, 0, sizeof(xfs_attr_leaf_entry_t));
/*
* If we removed the first entry, re-find the first used byte
* in the name area. Note that if the entry was the "firstused",
* then we don't have a "hole" in our block resulting from
* removing the name.
*/
if (smallest) {
tmp = args->geo->blksize;
entry = xfs_attr3_leaf_entryp(leaf);
for (i = ichdr.count - 1; i >= 0; entry++, i--) {
ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize);
if (be16_to_cpu(entry->nameidx) < tmp)
tmp = be16_to_cpu(entry->nameidx);
}
ichdr.firstused = tmp;
ASSERT(ichdr.firstused != 0);
} else {
ichdr.holes = 1; /* mark as needing compaction */
}
xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, &leaf->hdr,
xfs_attr3_leaf_hdr_size(leaf)));
/*
* Check if leaf is less than 50% full, caller may want to
* "join" the leaf with a sibling if so.
*/
tmp = ichdr.usedbytes + xfs_attr3_leaf_hdr_size(leaf) +
ichdr.count * sizeof(xfs_attr_leaf_entry_t);
return tmp < args->geo->magicpct; /* leaf is < 37% full */
}
/*
* Move all the attribute list entries from drop_leaf into save_leaf.
*/
void
xfs_attr3_leaf_unbalance(
struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk)
{
struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr;
struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr;
struct xfs_attr3_icleaf_hdr drophdr;
struct xfs_attr3_icleaf_hdr savehdr;
struct xfs_attr_leaf_entry *entry;
trace_xfs_attr_leaf_unbalance(state->args);
drop_leaf = drop_blk->bp->b_addr;
save_leaf = save_blk->bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &drophdr, drop_leaf);
xfs_attr3_leaf_hdr_from_disk(state->args->geo, &savehdr, save_leaf);
entry = xfs_attr3_leaf_entryp(drop_leaf);
/*
* Save last hashval from dying block for later Btree fixup.
*/
drop_blk->hashval = be32_to_cpu(entry[drophdr.count - 1].hashval);
/*
* Check if we need a temp buffer, or can we do it in place.
* Note that we don't check "leaf" for holes because we will
* always be dropping it, toosmall() decided that for us already.
*/
if (savehdr.holes == 0) {
/*
* dest leaf has no holes, so we add there. May need
* to make some room in the entry array.
*/
if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
drop_blk->bp, &drophdr)) {
xfs_attr3_leaf_moveents(state->args,
drop_leaf, &drophdr, 0,
save_leaf, &savehdr, 0,
drophdr.count);
} else {
xfs_attr3_leaf_moveents(state->args,
drop_leaf, &drophdr, 0,
save_leaf, &savehdr,
savehdr.count, drophdr.count);
}
} else {
/*
* Destination has holes, so we make a temporary copy
* of the leaf and add them both to that.
*/
struct xfs_attr_leafblock *tmp_leaf;
struct xfs_attr3_icleaf_hdr tmphdr;
tmp_leaf = kmem_zalloc(state->args->geo->blksize, KM_SLEEP);
/*
* Copy the header into the temp leaf so that all the stuff
* not in the incore header is present and gets copied back in
* once we've moved all the entries.
*/
memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
memset(&tmphdr, 0, sizeof(tmphdr));
tmphdr.magic = savehdr.magic;
tmphdr.forw = savehdr.forw;
tmphdr.back = savehdr.back;
tmphdr.firstused = state->args->geo->blksize;
/* write the header to the temp buffer to initialise it */
xfs_attr3_leaf_hdr_to_disk(state->args->geo, tmp_leaf, &tmphdr);
if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
drop_blk->bp, &drophdr)) {
xfs_attr3_leaf_moveents(state->args,
drop_leaf, &drophdr, 0,
tmp_leaf, &tmphdr, 0,
drophdr.count);
xfs_attr3_leaf_moveents(state->args,
save_leaf, &savehdr, 0,
tmp_leaf, &tmphdr, tmphdr.count,
savehdr.count);
} else {
xfs_attr3_leaf_moveents(state->args,
save_leaf, &savehdr, 0,
tmp_leaf, &tmphdr, 0,
savehdr.count);
xfs_attr3_leaf_moveents(state->args,
drop_leaf, &drophdr, 0,
tmp_leaf, &tmphdr, tmphdr.count,
drophdr.count);
}
memcpy(save_leaf, tmp_leaf, state->args->geo->blksize);
savehdr = tmphdr; /* struct copy */
kmem_free(tmp_leaf);
}
xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr);
xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
state->args->geo->blksize - 1);
/*
* Copy out last hashval in each block for B-tree code.
*/
entry = xfs_attr3_leaf_entryp(save_leaf);
save_blk->hashval = be32_to_cpu(entry[savehdr.count - 1].hashval);
}
/*========================================================================
* Routines used for finding things in the Btree.
*========================================================================*/
/*
* Look up a name in a leaf attribute list structure.
* This is the internal routine, it uses the caller's buffer.
*
* Note that duplicate keys are allowed, but only check within the
* current leaf node. The Btree code must check in adjacent leaf nodes.
*
* Return in args->index the index into the entry[] array of either
* the found entry, or where the entry should have been (insert before
* that entry).
*
* Don't change the args->value unless we find the attribute.
*/
int
xfs_attr3_leaf_lookup_int(
struct xfs_buf *bp,
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_entry *entries;
struct xfs_attr_leaf_name_local *name_loc;
struct xfs_attr_leaf_name_remote *name_rmt;
xfs_dahash_t hashval;
int probe;
int span;
trace_xfs_attr_leaf_lookup(args);
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
entries = xfs_attr3_leaf_entryp(leaf);
if (ichdr.count >= args->geo->blksize / 8)
return -EFSCORRUPTED;
/*
* Binary search. (note: small blocks will skip this loop)
*/
hashval = args->hashval;
probe = span = ichdr.count / 2;
for (entry = &entries[probe]; span > 4; entry = &entries[probe]) {
span /= 2;
if (be32_to_cpu(entry->hashval) < hashval)
probe += span;
else if (be32_to_cpu(entry->hashval) > hashval)
probe -= span;
else
break;
}
if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count)))
return -EFSCORRUPTED;
if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval))
return -EFSCORRUPTED;
/*
* Since we may have duplicate hashval's, find the first matching
* hashval in the leaf.
*/
while (probe > 0 && be32_to_cpu(entry->hashval) >= hashval) {
entry--;
probe--;
}
while (probe < ichdr.count &&
be32_to_cpu(entry->hashval) < hashval) {
entry++;
probe++;
}
if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) {
args->index = probe;
return -ENOATTR;
}
/*
* Duplicate keys may be present, so search all of them for a match.
*/
for (; probe < ichdr.count && (be32_to_cpu(entry->hashval) == hashval);
entry++, probe++) {
/*
* GROT: Add code to remove incomplete entries.
*/
/*
* If we are looking for INCOMPLETE entries, show only those.
* If we are looking for complete entries, show only those.
*/
if ((args->flags & XFS_ATTR_INCOMPLETE) !=
(entry->flags & XFS_ATTR_INCOMPLETE)) {
continue;
}
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf, probe);
if (name_loc->namelen != args->namelen)
continue;
if (memcmp(args->name, name_loc->nameval,
args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
return -EEXIST;
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf, probe);
if (name_rmt->namelen != args->namelen)
continue;
if (memcmp(args->name, name_rmt->name,
args->namelen) != 0)
continue;
if (!xfs_attr_namesp_match(args->flags, entry->flags))
continue;
args->index = probe;
args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = xfs_attr3_rmt_blocks(
args->dp->i_mount,
args->rmtvaluelen);
return -EEXIST;
}
}
args->index = probe;
return -ENOATTR;
}
/*
* Get the value associated with an attribute name from a leaf attribute
* list structure.
*/
int
xfs_attr3_leaf_getvalue(
struct xfs_buf *bp,
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_local *name_loc;
struct xfs_attr_leaf_name_remote *name_rmt;
int valuelen;
leaf = bp->b_addr;
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(ichdr.count < args->geo->blksize / 8);
ASSERT(args->index < ichdr.count);
entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
ASSERT(name_loc->namelen == args->namelen);
ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
valuelen = be16_to_cpu(name_loc->valuelen);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = valuelen;
return 0;
}
if (args->valuelen < valuelen) {
args->valuelen = valuelen;
return -ERANGE;
}
args->valuelen = valuelen;
memcpy(args->value, &name_loc->nameval[args->namelen], valuelen);
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
ASSERT(name_rmt->namelen == args->namelen);
ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
args->rmtvaluelen);
if (args->flags & ATTR_KERNOVAL) {
args->valuelen = args->rmtvaluelen;
return 0;
}
if (args->valuelen < args->rmtvaluelen) {
args->valuelen = args->rmtvaluelen;
return -ERANGE;
}
args->valuelen = args->rmtvaluelen;
}
return 0;
}
/*========================================================================
* Utility routines.
*========================================================================*/
/*
* Move the indicated entries from one leaf to another.
* NOTE: this routine modifies both source and destination leaves.
*/
/*ARGSUSED*/
STATIC void
xfs_attr3_leaf_moveents(
struct xfs_da_args *args,
struct xfs_attr_leafblock *leaf_s,
struct xfs_attr3_icleaf_hdr *ichdr_s,
int start_s,
struct xfs_attr_leafblock *leaf_d,
struct xfs_attr3_icleaf_hdr *ichdr_d,
int start_d,
int count)
{
struct xfs_attr_leaf_entry *entry_s;
struct xfs_attr_leaf_entry *entry_d;
int desti;
int tmp;
int i;
/*
* Check for nothing to do.
*/
if (count == 0)
return;
/*
* Set up environment.
*/
ASSERT(ichdr_s->magic == XFS_ATTR_LEAF_MAGIC ||
ichdr_s->magic == XFS_ATTR3_LEAF_MAGIC);
ASSERT(ichdr_s->magic == ichdr_d->magic);
ASSERT(ichdr_s->count > 0 && ichdr_s->count < args->geo->blksize / 8);
ASSERT(ichdr_s->firstused >= (ichdr_s->count * sizeof(*entry_s))
+ xfs_attr3_leaf_hdr_size(leaf_s));
ASSERT(ichdr_d->count < args->geo->blksize / 8);
ASSERT(ichdr_d->firstused >= (ichdr_d->count * sizeof(*entry_d))
+ xfs_attr3_leaf_hdr_size(leaf_d));
ASSERT(start_s < ichdr_s->count);
ASSERT(start_d <= ichdr_d->count);
ASSERT(count <= ichdr_s->count);
/*
* Move the entries in the destination leaf up to make a hole?
*/
if (start_d < ichdr_d->count) {
tmp = ichdr_d->count - start_d;
tmp *= sizeof(xfs_attr_leaf_entry_t);
entry_s = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d + count];
memmove(entry_d, entry_s, tmp);
}
/*
* Copy all entry's in the same (sorted) order,
* but allocate attribute info packed and in sequence.
*/
entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
desti = start_d;
for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
ASSERT(be16_to_cpu(entry_s->nameidx) >= ichdr_s->firstused);
tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
#ifdef GROT
/*
* Code to drop INCOMPLETE entries. Difficult to use as we
* may also need to change the insertion index. Code turned
* off for 6.2, should be revisited later.
*/
if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
ichdr_s->usedbytes -= tmp;
ichdr_s->count -= 1;
entry_d--; /* to compensate for ++ in loop hdr */
desti--;
if ((start_s + i) < offset)
result++; /* insertion index adjustment */
} else {
#endif /* GROT */
ichdr_d->firstused -= tmp;
/* both on-disk, don't endian flip twice */
entry_d->hashval = entry_s->hashval;
entry_d->nameidx = cpu_to_be16(ichdr_d->firstused);
entry_d->flags = entry_s->flags;
ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
<= args->geo->blksize);
memmove(xfs_attr3_leaf_name(leaf_d, desti),
xfs_attr3_leaf_name(leaf_s, start_s + i), tmp);
ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
<= args->geo->blksize);
memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
ichdr_s->usedbytes -= tmp;
ichdr_d->usedbytes += tmp;
ichdr_s->count -= 1;
ichdr_d->count += 1;
tmp = ichdr_d->count * sizeof(xfs_attr_leaf_entry_t)
+ xfs_attr3_leaf_hdr_size(leaf_d);
ASSERT(ichdr_d->firstused >= tmp);
#ifdef GROT
}
#endif /* GROT */
}
/*
* Zero out the entries we just copied.
*/
if (start_s == ichdr_s->count) {
tmp = count * sizeof(xfs_attr_leaf_entry_t);
entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
ASSERT(((char *)entry_s + tmp) <=
((char *)leaf_s + args->geo->blksize));
memset(entry_s, 0, tmp);
} else {
/*
* Move the remaining entries down to fill the hole,
* then zero the entries at the top.
*/
tmp = (ichdr_s->count - count) * sizeof(xfs_attr_leaf_entry_t);
entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s + count];
entry_d = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
memmove(entry_d, entry_s, tmp);
tmp = count * sizeof(xfs_attr_leaf_entry_t);
entry_s = &xfs_attr3_leaf_entryp(leaf_s)[ichdr_s->count];
ASSERT(((char *)entry_s + tmp) <=
((char *)leaf_s + args->geo->blksize));
memset(entry_s, 0, tmp);
}
/*
* Fill in the freemap information
*/
ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_d);
ichdr_d->freemap[0].base += ichdr_d->count * sizeof(xfs_attr_leaf_entry_t);
ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
ichdr_d->freemap[1].base = 0;
ichdr_d->freemap[2].base = 0;
ichdr_d->freemap[1].size = 0;
ichdr_d->freemap[2].size = 0;
ichdr_s->holes = 1; /* leaf may not be compact */
}
/*
* Pick up the last hashvalue from a leaf block.
*/
xfs_dahash_t
xfs_attr_leaf_lasthash(
struct xfs_buf *bp,
int *count)
{
struct xfs_attr3_icleaf_hdr ichdr;
struct xfs_attr_leaf_entry *entries;
struct xfs_mount *mp = bp->b_target->bt_mount;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, bp->b_addr);
entries = xfs_attr3_leaf_entryp(bp->b_addr);
if (count)
*count = ichdr.count;
if (!ichdr.count)
return 0;
return be32_to_cpu(entries[ichdr.count - 1].hashval);
}
/*
* Calculate the number of bytes used to store the indicated attribute
* (whether local or remote only calculate bytes in this block).
*/
STATIC int
xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
{
struct xfs_attr_leaf_entry *entries;
xfs_attr_leaf_name_local_t *name_loc;
xfs_attr_leaf_name_remote_t *name_rmt;
int size;
entries = xfs_attr3_leaf_entryp(leaf);
if (entries[index].flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf, index);
size = xfs_attr_leaf_entsize_local(name_loc->namelen,
be16_to_cpu(name_loc->valuelen));
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf, index);
size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
}
return size;
}
/*
* Calculate the number of bytes that would be required to store the new
* attribute (whether local or remote only calculate bytes in this block).
* This routine decides as a side effect whether the attribute will be
* a "local" or a "remote" attribute.
*/
int
xfs_attr_leaf_newentsize(
struct xfs_da_args *args,
int *local)
{
int size;
size = xfs_attr_leaf_entsize_local(args->namelen, args->valuelen);
if (size < xfs_attr_leaf_entsize_local_max(args->geo->blksize)) {
if (local)
*local = 1;
return size;
}
if (local)
*local = 0;
return xfs_attr_leaf_entsize_remote(args->namelen);
}
/*========================================================================
* Manage the INCOMPLETE flag in a leaf entry
*========================================================================*/
/*
* Clear the INCOMPLETE flag on an entry in a leaf block.
*/
int
xfs_attr3_leaf_clearflag(
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_remote *name_rmt;
struct xfs_buf *bp;
int error;
#ifdef DEBUG
struct xfs_attr3_icleaf_hdr ichdr;
xfs_attr_leaf_name_local_t *name_loc;
int namelen;
char *name;
#endif /* DEBUG */
trace_xfs_attr_leaf_clearflag(args);
/*
* Set up the operation.
*/
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
if (error)
return error;
leaf = bp->b_addr;
entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
#ifdef DEBUG
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(args->index < ichdr.count);
ASSERT(args->index >= 0);
if (entry->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
namelen = name_loc->namelen;
name = (char *)name_loc->nameval;
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
namelen = name_rmt->namelen;
name = (char *)name_rmt->name;
}
ASSERT(be32_to_cpu(entry->hashval) == args->hashval);
ASSERT(namelen == args->namelen);
ASSERT(memcmp(name, args->name, namelen) == 0);
#endif /* DEBUG */
entry->flags &= ~XFS_ATTR_INCOMPLETE;
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if (args->rmtblkno) {
ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
/*
* Commit the flag value change and start the next trans in series.
*/
return xfs_trans_roll_inode(&args->trans, args->dp);
}
/*
* Set the INCOMPLETE flag on an entry in a leaf block.
*/
int
xfs_attr3_leaf_setflag(
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf;
struct xfs_attr_leaf_entry *entry;
struct xfs_attr_leaf_name_remote *name_rmt;
struct xfs_buf *bp;
int error;
#ifdef DEBUG
struct xfs_attr3_icleaf_hdr ichdr;
#endif
trace_xfs_attr_leaf_setflag(args);
/*
* Set up the operation.
*/
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp);
if (error)
return error;
leaf = bp->b_addr;
#ifdef DEBUG
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
ASSERT(args->index < ichdr.count);
ASSERT(args->index >= 0);
#endif
entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
entry->flags |= XFS_ATTR_INCOMPLETE;
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
xfs_trans_log_buf(args->trans, bp,
XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
}
/*
* Commit the flag value change and start the next trans in series.
*/
return xfs_trans_roll_inode(&args->trans, args->dp);
}
/*
* In a single transaction, clear the INCOMPLETE flag on the leaf entry
* given by args->blkno/index and set the INCOMPLETE flag on the leaf
* entry given by args->blkno2/index2.
*
* Note that they could be in different blocks, or in the same block.
*/
int
xfs_attr3_leaf_flipflags(
struct xfs_da_args *args)
{
struct xfs_attr_leafblock *leaf1;
struct xfs_attr_leafblock *leaf2;
struct xfs_attr_leaf_entry *entry1;
struct xfs_attr_leaf_entry *entry2;
struct xfs_attr_leaf_name_remote *name_rmt;
struct xfs_buf *bp1;
struct xfs_buf *bp2;
int error;
#ifdef DEBUG
struct xfs_attr3_icleaf_hdr ichdr1;
struct xfs_attr3_icleaf_hdr ichdr2;
xfs_attr_leaf_name_local_t *name_loc;
int namelen1, namelen2;
char *name1, *name2;
#endif /* DEBUG */
trace_xfs_attr_leaf_flipflags(args);
/*
* Read the block containing the "old" attr
*/
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, -1, &bp1);
if (error)
return error;
/*
* Read the block containing the "new" attr, if it is different
*/
if (args->blkno2 != args->blkno) {
error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2,
-1, &bp2);
if (error)
return error;
} else {
bp2 = bp1;
}
leaf1 = bp1->b_addr;
entry1 = &xfs_attr3_leaf_entryp(leaf1)[args->index];
leaf2 = bp2->b_addr;
entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2];
#ifdef DEBUG
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr1, leaf1);
ASSERT(args->index < ichdr1.count);
ASSERT(args->index >= 0);
xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr2, leaf2);
ASSERT(args->index2 < ichdr2.count);
ASSERT(args->index2 >= 0);
if (entry1->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf1, args->index);
namelen1 = name_loc->namelen;
name1 = (char *)name_loc->nameval;
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
namelen1 = name_rmt->namelen;
name1 = (char *)name_rmt->name;
}
if (entry2->flags & XFS_ATTR_LOCAL) {
name_loc = xfs_attr3_leaf_name_local(leaf2, args->index2);
namelen2 = name_loc->namelen;
name2 = (char *)name_loc->nameval;
} else {
name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
namelen2 = name_rmt->namelen;
name2 = (char *)name_rmt->name;
}
ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval));
ASSERT(namelen1 == namelen2);
ASSERT(memcmp(name1, name2, namelen1) == 0);
#endif /* DEBUG */
ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE);
ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
entry1->flags &= ~XFS_ATTR_INCOMPLETE;
xfs_trans_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
if (args->rmtblkno) {
ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
xfs_trans_log_buf(args->trans, bp1,
XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
}
entry2->flags |= XFS_ATTR_INCOMPLETE;
xfs_trans_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
name_rmt->valueblk = 0;
name_rmt->valuelen = 0;
xfs_trans_log_buf(args->trans, bp2,
XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
}
/*
* Commit the flag value change and start the next trans in series.
*/
error = xfs_trans_roll_inode(&args->trans, args->dp);
return error;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_213_0 |
crossvul-cpp_data_bad_2512_0 | /*
* Copyright 2014, Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
/*
* A daemon that supports a simplified interface for writing TCMU
* handlers.
*/
#define _GNU_SOURCE
#define _BITS_UIO_H
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <errno.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <assert.h>
#include <dlfcn.h>
#include <pthread.h>
#include <signal.h>
#include <glib.h>
#include <glib-unix.h>
#include <gio/gio.h>
#include <getopt.h>
#include <poll.h>
#include <scsi/scsi.h>
#include <libkmod.h>
#include <sys/utsname.h>
#include "target_core_user_local.h"
#include "darray.h"
#include "tcmu-runner.h"
#include "tcmur_aio.h"
#include "tcmur_device.h"
#include "tcmur_cmd_handler.h"
#include "libtcmu.h"
#include "tcmuhandler-generated.h"
#include "version.h"
#include "libtcmu_config.h"
#include "libtcmu_log.h"
static char *handler_path = DEFAULT_HANDLER_PATH;
/* tcmu log dir path */
extern char *tcmu_log_dir;
static struct tcmu_config *tcmu_cfg;
darray(struct tcmur_handler *) g_runner_handlers = darray_new();
static struct tcmur_handler *find_handler_by_subtype(gchar *subtype)
{
struct tcmur_handler **handler;
darray_foreach(handler, g_runner_handlers) {
if (strcmp((*handler)->subtype, subtype) == 0)
return *handler;
}
return NULL;
}
int tcmur_register_handler(struct tcmur_handler *handler)
{
struct tcmur_handler *h;
int i;
for (i = 0; i < darray_size(g_runner_handlers); i++) {
h = darray_item(g_runner_handlers, i);
if (!strcmp(h->subtype, handler->subtype)) {
tcmu_err("Handler %s has already been registered\n",
handler->subtype);
return -1;
}
}
darray_append(g_runner_handlers, handler);
return 0;
}
bool tcmur_unregister_handler(struct tcmur_handler *handler)
{
int i;
for (i = 0; i < darray_size(g_runner_handlers); i++) {
if (darray_item(g_runner_handlers, i) == handler) {
darray_remove(g_runner_handlers, i);
return true;
}
}
return false;
}
static int is_handler(const struct dirent *dirent)
{
if (strncmp(dirent->d_name, "handler_", 8))
return 0;
return 1;
}
static int open_handlers(void)
{
struct dirent **dirent_list;
int num_handlers;
int num_good = 0;
int i;
num_handlers = scandir(handler_path, &dirent_list, is_handler, alphasort);
if (num_handlers == -1)
return -1;
for (i = 0; i < num_handlers; i++) {
char *path;
void *handle;
int (*handler_init)(void);
int ret;
ret = asprintf(&path, "%s/%s", handler_path, dirent_list[i]->d_name);
if (ret == -1) {
tcmu_err("ENOMEM\n");
continue;
}
handle = dlopen(path, RTLD_NOW|RTLD_LOCAL);
if (!handle) {
tcmu_err("Could not open handler at %s: %s\n", path, dlerror());
free(path);
continue;
}
handler_init = dlsym(handle, "handler_init");
if (!handler_init) {
tcmu_err("dlsym failure on %s\n", path);
free(path);
continue;
}
ret = handler_init();
free(path);
if (ret == 0)
num_good++;
}
for (i = 0; i < num_handlers; i++)
free(dirent_list[i]);
free(dirent_list);
return num_good;
}
static gboolean sighandler(gpointer user_data)
{
tcmulib_cleanup_all_cmdproc_threads();
tcmu_cancel_log_thread();
tcmu_cancel_config_thread(tcmu_cfg);
g_main_loop_quit((GMainLoop*)user_data);
return G_SOURCE_CONTINUE;
}
gboolean tcmulib_callback(GIOChannel *source,
GIOCondition condition,
gpointer data)
{
struct tcmulib_context *ctx = data;
tcmulib_master_fd_ready(ctx);
return TRUE;
}
static GDBusObjectManagerServer *manager = NULL;
static gboolean
on_check_config(TCMUService1 *interface,
GDBusMethodInvocation *invocation,
gchar *cfgstring,
gpointer user_data)
{
struct tcmur_handler *handler = user_data;
char *reason = NULL;
bool str_ok = true;
if (handler->check_config)
str_ok = handler->check_config(cfgstring, &reason);
if (str_ok)
reason = "success";
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", str_ok, reason ? : "unknown"));
if (!str_ok)
free(reason);
return TRUE;
}
static void
dbus_export_handler(struct tcmur_handler *handler, GCallback check_config)
{
GDBusObjectSkeleton *object;
char obj_name[128];
TCMUService1 *interface;
snprintf(obj_name, sizeof(obj_name), "/org/kernel/TCMUService1/%s",
handler->subtype);
object = g_dbus_object_skeleton_new(obj_name);
interface = tcmuservice1_skeleton_new();
g_dbus_object_skeleton_add_interface(object, G_DBUS_INTERFACE_SKELETON(interface));
g_signal_connect(interface,
"handle-check-config",
check_config,
handler); /* user_data */
tcmuservice1_set_config_desc(interface, handler->cfg_desc);
g_dbus_object_manager_server_export(manager, G_DBUS_OBJECT_SKELETON(object));
g_object_unref(object);
}
static bool
dbus_unexport_handler(struct tcmur_handler *handler)
{
char obj_name[128];
snprintf(obj_name, sizeof(obj_name), "/org/kernel/TCMUService1/%s",
handler->subtype);
return g_dbus_object_manager_server_unexport(manager, obj_name) == TRUE;
}
struct dbus_info {
guint watcher_id;
/* The RegisterHandler invocation on
* org.kernel.TCMUService1.HandlerManager1 interface. */
GDBusMethodInvocation *register_invocation;
/* Connection to the handler's bus_name. */
GDBusConnection *connection;
};
static int dbus_handler_open(struct tcmu_device *dev)
{
return -1;
}
static void dbus_handler_close(struct tcmu_device *dev)
{
/* nop */
}
static int dbus_handler_handle_cmd(struct tcmu_device *dev,
struct tcmulib_cmd *cmd)
{
abort();
}
static gboolean
on_dbus_check_config(TCMUService1 *interface,
GDBusMethodInvocation *invocation,
gchar *cfgstring,
gpointer user_data)
{
char *bus_name, *obj_name;
struct tcmur_handler *handler = user_data;
GDBusConnection *connection;
GError *error = NULL;
GVariant *result;
bus_name = g_strdup_printf("org.kernel.TCMUService1.HandlerManager1.%s",
handler->subtype);
obj_name = g_strdup_printf("/org/kernel/TCMUService1/HandlerManager1/%s",
handler->subtype);
connection = g_dbus_method_invocation_get_connection(invocation);
result = g_dbus_connection_call_sync(connection,
bus_name,
obj_name,
"org.kernel.TCMUService1",
"CheckConfig",
g_variant_new("(s)", cfgstring),
NULL, G_DBUS_CALL_FLAGS_NONE, -1,
NULL, &error);
if (result)
g_dbus_method_invocation_return_value(invocation, result);
else
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE, error->message));
g_free(bus_name);
g_free(obj_name);
return TRUE;
}
static void
on_handler_appeared(GDBusConnection *connection,
const gchar *name,
const gchar *name_owner,
gpointer user_data)
{
struct tcmur_handler *handler = user_data;
struct dbus_info *info = handler->opaque;
if (info->register_invocation) {
info->connection = connection;
tcmur_register_handler(handler);
dbus_export_handler(handler, G_CALLBACK(on_dbus_check_config));
g_dbus_method_invocation_return_value(info->register_invocation,
g_variant_new("(bs)", TRUE, "succeeded"));
info->register_invocation = NULL;
}
}
static void
on_handler_vanished(GDBusConnection *connection,
const gchar *name,
gpointer user_data)
{
struct tcmur_handler *handler = user_data;
struct dbus_info *info = handler->opaque;
if (info->register_invocation) {
char *reason;
reason = g_strdup_printf("Cannot find handler bus name: "
"org.kernel.TCMUService1.HandlerManager1.%s",
handler->subtype);
g_dbus_method_invocation_return_value(info->register_invocation,
g_variant_new("(bs)", FALSE, reason));
g_free(reason);
}
tcmur_unregister_handler(handler);
dbus_unexport_handler(handler);
}
static gboolean
on_register_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gchar *cfg_desc,
gpointer user_data)
{
struct tcmur_handler *handler;
struct dbus_info *info;
char *bus_name;
bus_name = g_strdup_printf("org.kernel.TCMUService1.HandlerManager1.%s",
subtype);
handler = g_new0(struct tcmur_handler, 1);
handler->subtype = g_strdup(subtype);
handler->cfg_desc = g_strdup(cfg_desc);
handler->open = dbus_handler_open;
handler->close = dbus_handler_close;
handler->handle_cmd = dbus_handler_handle_cmd;
info = g_new0(struct dbus_info, 1);
info->register_invocation = invocation;
info->watcher_id = g_bus_watch_name(G_BUS_TYPE_SYSTEM,
bus_name,
G_BUS_NAME_WATCHER_FLAGS_NONE,
on_handler_appeared,
on_handler_vanished,
handler,
NULL);
g_free(bus_name);
handler->opaque = info;
return TRUE;
}
static gboolean
on_unregister_handler(TCMUService1HandlerManager1 *interface,
GDBusMethodInvocation *invocation,
gchar *subtype,
gpointer user_data)
{
struct tcmur_handler *handler = find_handler_by_subtype(subtype);
struct dbus_info *info = handler ? handler->opaque : NULL;
if (!handler) {
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", FALSE,
"unknown subtype"));
return TRUE;
}
dbus_unexport_handler(handler);
tcmur_unregister_handler(handler);
g_bus_unwatch_name(info->watcher_id);
g_free(info);
g_free(handler);
g_dbus_method_invocation_return_value(invocation,
g_variant_new("(bs)", TRUE, "succeeded"));
return TRUE;
}
void dbus_handler_manager1_init(GDBusConnection *connection)
{
GError *error = NULL;
TCMUService1HandlerManager1 *interface;
gboolean ret;
interface = tcmuservice1_handler_manager1_skeleton_new();
ret = g_dbus_interface_skeleton_export(
G_DBUS_INTERFACE_SKELETON(interface),
connection,
"/org/kernel/TCMUService1/HandlerManager1",
&error);
g_signal_connect(interface,
"handle-register-handler",
G_CALLBACK (on_register_handler),
NULL);
g_signal_connect(interface,
"handle-unregister-handler",
G_CALLBACK (on_unregister_handler),
NULL);
if (!ret)
tcmu_err("Handler manager export failed: %s\n",
error ? error->message : "unknown error");
if (error)
g_error_free(error);
}
static void dbus_bus_acquired(GDBusConnection *connection,
const gchar *name,
gpointer user_data)
{
struct tcmur_handler **handler;
tcmu_dbg("bus %s acquired\n", name);
manager = g_dbus_object_manager_server_new("/org/kernel/TCMUService1");
darray_foreach(handler, g_runner_handlers) {
dbus_export_handler(*handler, G_CALLBACK(on_check_config));
}
dbus_handler_manager1_init(connection);
g_dbus_object_manager_server_set_connection(manager, connection);
}
static void dbus_name_acquired(GDBusConnection *connection,
const gchar *name,
gpointer user_data)
{
tcmu_dbg("name %s acquired\n", name);
}
static void dbus_name_lost(GDBusConnection *connection,
const gchar *name,
gpointer user_data)
{
tcmu_dbg("name lost\n");
}
static int load_our_module(void)
{
struct kmod_list *list = NULL, *itr;
struct kmod_ctx *ctx;
struct stat sb;
struct utsname u;
int ret;
ctx = kmod_new(NULL, NULL);
if (!ctx) {
tcmu_err("kmod_new() failed: %m\n");
return -1;
}
ret = kmod_module_new_from_lookup(ctx, "target_core_user", &list);
if (ret < 0) {
/* In some environments like containers, /lib/modules/`uname -r`
* will not exist, in such cases the load module job be taken
* care by admin, either by manual load or makesure it's builtin
*/
if (ENOENT == errno) {
if (uname(&u) < 0) {
tcmu_err("uname() failed: %m\n");
} else {
tcmu_info("no modules directory '/lib/modules/%s', checking module target_core_user entry in '/sys/modules/'\n",
u.release);
ret = stat("/sys/module/target_core_user", &sb);
if (!ret) {
tcmu_dbg("Module target_core_user already loaded\n");
} else {
tcmu_err("stat() on '/sys/module/target_core_user' failed: %m\n");
}
}
} else {
tcmu_err("kmod_module_new_from_lookup() failed to lookup alias target_core_use %m\n");
}
kmod_unref(ctx);
return ret;
}
if (!list) {
tcmu_err("kmod_module_new_from_lookup() failed to find module target_core_user\n");
kmod_unref(ctx);
return -ENOENT;
}
kmod_list_foreach(itr, list) {
int state, err;
struct kmod_module *mod = kmod_module_get_module(itr);
state = kmod_module_get_initstate(mod);
switch (state) {
case KMOD_MODULE_BUILTIN:
tcmu_info("Module '%s' is builtin\n",
kmod_module_get_name(mod));
break;
case KMOD_MODULE_LIVE:
tcmu_dbg("Module '%s' is already loaded\n",
kmod_module_get_name(mod));
break;
default:
err = kmod_module_probe_insert_module(mod,
KMOD_PROBE_APPLY_BLACKLIST,
NULL, NULL, NULL, NULL);
if (err == 0) {
tcmu_info("Inserted module '%s'\n",
kmod_module_get_name(mod));
} else if (err == KMOD_PROBE_APPLY_BLACKLIST) {
tcmu_err("Module '%s' is blacklisted\n",
kmod_module_get_name(mod));
} else {
tcmu_err("Failed to insert '%s'\n",
kmod_module_get_name(mod));
}
ret = err;
}
kmod_module_unref(mod);
}
kmod_module_unref_list(list);
kmod_unref(ctx);
return ret;
}
static void cmdproc_thread_cleanup(void *arg)
{
struct tcmu_device *dev = arg;
struct tcmur_handler *rhandler = tcmu_get_runner_handler(dev);
rhandler->close(dev);
}
static void *tcmur_cmdproc_thread(void *arg)
{
struct tcmu_device *dev = arg;
struct tcmur_handler *rhandler = tcmu_get_runner_handler(dev);
struct pollfd pfd;
int ret;
pthread_cleanup_push(cmdproc_thread_cleanup, dev);
while (1) {
int completed = 0;
struct tcmulib_cmd *cmd;
tcmulib_processing_start(dev);
while ((cmd = tcmulib_get_next_command(dev)) != NULL) {
if (tcmu_get_log_level() == TCMU_LOG_DEBUG_SCSI_CMD)
tcmu_cdb_debug_info(cmd);
if (tcmur_handler_is_passthrough_only(rhandler))
ret = tcmur_cmd_passthrough_handler(dev, cmd);
else
ret = tcmur_generic_handle_cmd(dev, cmd);
if (ret == TCMU_NOT_HANDLED)
tcmu_warn("Command 0x%x not supported\n", cmd->cdb[0]);
/*
* command (processing) completion is called in the following
* scenarios:
* - handle_cmd: synchronous handlers
* - generic_handle_cmd: non tcmur handler calls (see generic_cmd())
* and on errors when calling tcmur handler.
*/
if (ret != TCMU_ASYNC_HANDLED) {
completed = 1;
tcmur_command_complete(dev, cmd, ret);
}
}
if (completed)
tcmulib_processing_complete(dev);
pfd.fd = tcmu_get_dev_fd(dev);
pfd.events = POLLIN;
pfd.revents = 0;
poll(&pfd, 1, -1);
if (pfd.revents != POLLIN) {
tcmu_err("poll received unexpected revent: 0x%x\n", pfd.revents);
break;
}
}
tcmu_err("thread terminating, should never happen\n");
pthread_cleanup_pop(1);
return NULL;
}
static int dev_added(struct tcmu_device *dev)
{
struct tcmur_handler *rhandler = tcmu_get_runner_handler(dev);
struct tcmur_device *rdev;
int32_t block_size, max_sectors;
int64_t dev_size;
int ret;
rdev = calloc(1, sizeof(*rdev));
if (!rdev)
return -ENOMEM;
tcmu_set_daemon_dev_private(dev, rdev);
ret = -EINVAL;
block_size = tcmu_get_attribute(dev, "hw_block_size");
if (block_size <= 0) {
tcmu_dev_err(dev, "Could not get hw_block_size\n");
goto free_rdev;
}
tcmu_set_dev_block_size(dev, block_size);
dev_size = tcmu_get_device_size(dev);
if (dev_size < 0) {
tcmu_dev_err(dev, "Could not get device size\n");
goto free_rdev;
}
tcmu_set_dev_num_lbas(dev, dev_size / block_size);
max_sectors = tcmu_get_attribute(dev, "hw_max_sectors");
if (max_sectors < 0)
goto free_rdev;
tcmu_set_dev_max_xfer_len(dev, max_sectors);
tcmu_dev_dbg(dev, "Got block_size %ld, size in bytes %lld\n",
block_size, dev_size);
ret = pthread_spin_init(&rdev->lock, 0);
if (ret != 0)
goto free_rdev;
ret = pthread_mutex_init(&rdev->caw_lock, NULL);
if (ret != 0)
goto cleanup_dev_lock;
ret = pthread_mutex_init(&rdev->format_lock, NULL);
if (ret != 0)
goto cleanup_caw_lock;
ret = setup_io_work_queue(dev);
if (ret < 0)
goto cleanup_format_lock;
ret = setup_aio_tracking(rdev);
if (ret < 0)
goto cleanup_io_work_queue;
ret = rhandler->open(dev);
if (ret)
goto cleanup_aio_tracking;
ret = tcmulib_start_cmdproc_thread(dev, tcmur_cmdproc_thread);
if (ret < 0)
goto close_dev;
return 0;
close_dev:
rhandler->close(dev);
cleanup_aio_tracking:
cleanup_aio_tracking(rdev);
cleanup_io_work_queue:
cleanup_io_work_queue(dev, true);
cleanup_format_lock:
pthread_mutex_destroy(&rdev->format_lock);
cleanup_caw_lock:
pthread_mutex_destroy(&rdev->caw_lock);
cleanup_dev_lock:
pthread_spin_destroy(&rdev->lock);
free_rdev:
free(rdev);
return ret;
}
static void dev_removed(struct tcmu_device *dev)
{
struct tcmur_device *rdev = tcmu_get_daemon_dev_private(dev);
int ret;
/*
* The order of cleaning up worker threads and calling ->removed()
* is important: for sync handlers, the worker thread needs to be
* terminated before removing the handler (i.e., calling handlers
* ->close() callout) in order to ensure that no handler callouts
* are getting invoked when shutting down the handler.
*/
cleanup_io_work_queue_threads(dev);
tcmulib_cleanup_cmdproc_thread(dev);
cleanup_io_work_queue(dev, false);
cleanup_aio_tracking(rdev);
ret = pthread_mutex_destroy(&rdev->format_lock);
if (ret != 0)
tcmu_err("could not cleanup format lock %d\n", ret);
ret = pthread_mutex_destroy(&rdev->caw_lock);
if (ret != 0)
tcmu_err("could not cleanup caw lock %d\n", ret);
ret = pthread_spin_destroy(&rdev->lock);
if (ret != 0)
tcmu_err("could not cleanup mailbox lock %d\n", ret);
free(rdev);
}
static bool tcmu_logdir_create(const char *path)
{
DIR* dir = opendir(path);
if (dir) {
closedir(dir);
} else if (errno == ENOENT) {
if (mkdir(path, 0755) == -1) {
tcmu_err("mkdir(%s) failed: %m\n", path);
return FALSE;
}
} else {
tcmu_err("opendir(%s) failed: %m\n", path);
return FALSE;
}
return TRUE;
}
static void usage(void) {
printf("\nusage:\n");
printf("\ttcmu-runner [options]\n");
printf("\noptions:\n");
printf("\t-h, --help: print this message and exit\n");
printf("\t-V, --version: print version and exit\n");
printf("\t-d, --debug: enable debug messages\n");
printf("\t--handler-path: set path to search for handler modules\n");
printf("\t\tdefault is %s\n", DEFAULT_HANDLER_PATH);
printf("\t-l, --tcmu-log-dir: tcmu log dir\n");
printf("\t\tdefault is %s\n", TCMU_LOG_DIR_DEFAULT);
printf("\n");
}
static struct option long_options[] = {
{"debug", no_argument, 0, 'd'},
{"handler-path", required_argument, 0, 0},
{"tcmu-log-dir", required_argument, 0, 'l'},
{"help", no_argument, 0, 'h'},
{"version", no_argument, 0, 'V'},
{0, 0, 0, 0},
};
int main(int argc, char **argv)
{
darray(struct tcmulib_handler) handlers = darray_new();
struct tcmulib_context *tcmulib_context;
struct tcmur_handler **tmp_r_handler;
GMainLoop *loop;
GIOChannel *libtcmu_gio;
guint reg_id;
int ret;
tcmu_cfg = tcmu_config_new();
if (!tcmu_cfg)
exit(1);
ret = tcmu_load_config(tcmu_cfg, NULL);
if (ret == -1)
goto err_out;
while (1) {
int option_index = 0;
int c;
c = getopt_long(argc, argv, "dhlV",
long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 0:
if (option_index == 1)
handler_path = strdup(optarg);
break;
case 'l':
if (strlen(optarg) > PATH_MAX - TCMU_LOG_FILENAME_MAX) {
tcmu_err("--tcmu-log-dir='%s' cannot exceed %d characters\n",
optarg, PATH_MAX - TCMU_LOG_FILENAME_MAX);
}
if (!tcmu_logdir_create(optarg)) {
goto err_out;
}
tcmu_log_dir = strdup(optarg);
break;
case 'd':
tcmu_set_log_level(TCMU_CONF_LOG_DEBUG_SCSI_CMD);
break;
case 'V':
printf("tcmu-runner %s\n", TCMUR_VERSION);
goto err_out;
default:
case 'h':
usage();
goto err_out;
}
}
tcmu_dbg("handler path: %s\n", handler_path);
ret = load_our_module();
if (ret < 0) {
tcmu_err("couldn't load module\n");
goto err_out;
}
ret = open_handlers();
if (ret < 0) {
tcmu_err("couldn't open handlers\n");
goto err_out;
}
tcmu_dbg("%d runner handlers found\n", ret);
/*
* Convert from tcmu-runner's handler struct to libtcmu's
* handler struct, an array of which we pass in, below.
*/
darray_foreach(tmp_r_handler, g_runner_handlers) {
struct tcmulib_handler tmp_handler;
tmp_handler.name = (*tmp_r_handler)->name;
tmp_handler.subtype = (*tmp_r_handler)->subtype;
tmp_handler.cfg_desc = (*tmp_r_handler)->cfg_desc;
tmp_handler.check_config = (*tmp_r_handler)->check_config;
tmp_handler.reconfig = (*tmp_r_handler)->reconfig;
tmp_handler.added = dev_added;
tmp_handler.removed = dev_removed;
/*
* Can hand out a ref to an internal pointer to the
* darray b/c handlers will never be added or removed
* once open_handlers() is done.
*/
tmp_handler.hm_private = *tmp_r_handler;
darray_append(handlers, tmp_handler);
}
tcmulib_context = tcmulib_initialize(handlers.item, handlers.size);
if (!tcmulib_context) {
tcmu_err("tcmulib_initialize failed\n");
goto err_out;
}
loop = g_main_loop_new(NULL, FALSE);
if (g_unix_signal_add(SIGINT, sighandler, loop) <= 0 ||
g_unix_signal_add(SIGTERM, sighandler, loop) <= 0) {
tcmu_err("couldn't setup signal handlers\n");
goto err_tcmulib_close;
}
/* Set up event for libtcmu */
libtcmu_gio = g_io_channel_unix_new(tcmulib_get_master_fd(tcmulib_context));
g_io_add_watch(libtcmu_gio, G_IO_IN, tcmulib_callback, tcmulib_context);
/* Set up DBus name, see callback */
reg_id = g_bus_own_name(G_BUS_TYPE_SYSTEM,
"org.kernel.TCMUService1",
G_BUS_NAME_OWNER_FLAGS_NONE,
dbus_bus_acquired,
dbus_name_acquired, // name acquired
dbus_name_lost, // name lost
NULL, // user data
NULL // user date free func
);
g_main_loop_run(loop);
tcmu_dbg("Exiting...\n");
g_bus_unown_name(reg_id);
g_main_loop_unref(loop);
tcmulib_close(tcmulib_context);
tcmu_config_destroy(tcmu_cfg);
return 0;
err_tcmulib_close:
tcmulib_close(tcmulib_context);
err_out:
tcmu_config_destroy(tcmu_cfg);
exit(1);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_2512_0 |
crossvul-cpp_data_good_5370_1 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2003 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* Windows Bitmap File Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <assert.h>
#include "jasper/jas_types.h"
#include "jasper/jas_stream.h"
#include "jasper/jas_image.h"
#include "jasper/jas_malloc.h"
#include "jasper/jas_debug.h"
#include "bmp_cod.h"
/******************************************************************************\
* Local prototypes.
\******************************************************************************/
static int bmp_gethdr(jas_stream_t *in, bmp_hdr_t *hdr);
static bmp_info_t *bmp_getinfo(jas_stream_t *in);
static int bmp_getdata(jas_stream_t *in, bmp_info_t *info, jas_image_t *image);
static int bmp_getint16(jas_stream_t *in, int_fast16_t *val);
static int bmp_getint32(jas_stream_t *in, int_fast32_t *val);
static int bmp_gobble(jas_stream_t *in, long n);
/******************************************************************************\
* Interface functions.
\******************************************************************************/
jas_image_t *bmp_decode(jas_stream_t *in, char *optstr)
{
jas_image_t *image;
bmp_hdr_t hdr;
bmp_info_t *info;
uint_fast16_t cmptno;
jas_image_cmptparm_t cmptparms[3];
jas_image_cmptparm_t *cmptparm;
uint_fast16_t numcmpts;
long n;
image = 0;
info = 0;
if (optstr) {
jas_eprintf("warning: ignoring BMP decoder options\n");
}
jas_eprintf(
"THE BMP FORMAT IS NOT FULLY SUPPORTED!\n"
"THAT IS, THE JASPER SOFTWARE CANNOT DECODE ALL TYPES OF BMP DATA.\n"
"IF YOU HAVE ANY PROBLEMS, PLEASE TRY CONVERTING YOUR IMAGE DATA\n"
"TO THE PNM FORMAT, AND USING THIS FORMAT INSTEAD.\n"
);
/* Read the bitmap header. */
if (bmp_gethdr(in, &hdr)) {
jas_eprintf("cannot get header\n");
goto error;
//return 0;
}
JAS_DBGLOG(1, (
"BMP header: magic 0x%x; siz %d; res1 %d; res2 %d; off %d\n",
hdr.magic, hdr.siz, hdr.reserved1, hdr.reserved2, hdr.off
));
/* Read the bitmap information. */
if (!(info = bmp_getinfo(in))) {
jas_eprintf("cannot get info\n");
//return 0;
goto error;
}
JAS_DBGLOG(1,
("BMP information: len %ld; width %ld; height %ld; numplanes %d; "
"depth %d; enctype %ld; siz %ld; hres %ld; vres %ld; numcolors %ld; "
"mincolors %ld\n", JAS_CAST(long, info->len),
JAS_CAST(long, info->width), JAS_CAST(long, info->height),
JAS_CAST(long, info->numplanes), JAS_CAST(long, info->depth),
JAS_CAST(long, info->enctype), JAS_CAST(long, info->siz),
JAS_CAST(long, info->hres), JAS_CAST(long, info->vres),
JAS_CAST(long, info->numcolors), JAS_CAST(long, info->mincolors)));
if (info->width < 0 || info->height < 0 || info->numplanes < 0 ||
info->depth < 0 || info->siz < 0 || info->hres < 0 || info->vres < 0) {
jas_eprintf("corrupt bit stream\n");
goto error;
}
/* Ensure that we support this type of BMP file. */
if (!bmp_issupported(&hdr, info)) {
jas_eprintf("error: unsupported BMP encoding\n");
//bmp_info_destroy(info);
//return 0;
goto error;
}
/* Skip over any useless data between the end of the palette
and start of the bitmap data. */
if ((n = hdr.off - (BMP_HDRLEN + BMP_INFOLEN + BMP_PALLEN(info))) < 0) {
jas_eprintf("error: possibly bad bitmap offset?\n");
goto error;
//return 0;
}
if (n > 0) {
jas_eprintf("skipping unknown data in BMP file\n");
if (bmp_gobble(in, n)) {
//bmp_info_destroy(info);
//return 0;
goto error;
}
}
/* Get the number of components. */
numcmpts = bmp_numcmpts(info);
for (cmptno = 0, cmptparm = cmptparms; cmptno < numcmpts; ++cmptno,
++cmptparm) {
cmptparm->tlx = 0;
cmptparm->tly = 0;
cmptparm->hstep = 1;
cmptparm->vstep = 1;
cmptparm->width = info->width;
cmptparm->height = info->height;
cmptparm->prec = 8;
cmptparm->sgnd = false;
}
/* Create image object. */
if (!(image = jas_image_create(numcmpts, cmptparms,
JAS_CLRSPC_UNKNOWN))) {
//bmp_info_destroy(info);
//return 0;
goto error;
}
if (numcmpts == 3) {
jas_image_setclrspc(image, JAS_CLRSPC_SRGB);
jas_image_setcmpttype(image, 0,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R));
jas_image_setcmpttype(image, 1,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G));
jas_image_setcmpttype(image, 2,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B));
} else {
jas_image_setclrspc(image, JAS_CLRSPC_SGRAY);
jas_image_setcmpttype(image, 0,
JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y));
}
/* Read the bitmap data. */
if (bmp_getdata(in, info, image)) {
//bmp_info_destroy(info);
//jas_image_destroy(image);
//return 0;
goto error;
}
bmp_info_destroy(info);
return image;
error:
if (info) {
bmp_info_destroy(info);
}
if (image) {
jas_image_destroy(image);
}
return 0;
}
int bmp_validate(jas_stream_t *in)
{
int n;
int i;
uchar buf[2];
assert(JAS_STREAM_MAXPUTBACK >= 2);
/* Read the first two characters that constitute the signature. */
if ((n = jas_stream_read(in, (char *) buf, 2)) < 0) {
return -1;
}
/* Put the characters read back onto the stream. */
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
/* Did we read enough characters? */
if (n < 2) {
return -1;
}
/* Is the signature correct for the BMP format? */
if (buf[0] == (BMP_MAGIC & 0xff) && buf[1] == (BMP_MAGIC >> 8)) {
return 0;
}
return -1;
}
/******************************************************************************\
* Code for aggregate types.
\******************************************************************************/
static int bmp_gethdr(jas_stream_t *in, bmp_hdr_t *hdr)
{
if (bmp_getint16(in, &hdr->magic) || hdr->magic != BMP_MAGIC ||
bmp_getint32(in, &hdr->siz) || bmp_getint16(in, &hdr->reserved1) ||
bmp_getint16(in, &hdr->reserved2) || bmp_getint32(in, &hdr->off)) {
return -1;
}
return 0;
}
static bmp_info_t *bmp_getinfo(jas_stream_t *in)
{
bmp_info_t *info;
int i;
bmp_palent_t *palent;
if (!(info = bmp_info_create())) {
return 0;
}
if (bmp_getint32(in, &info->len) || info->len != 40 ||
bmp_getint32(in, &info->width) || bmp_getint32(in, &info->height) ||
bmp_getint16(in, &info->numplanes) ||
bmp_getint16(in, &info->depth) || bmp_getint32(in, &info->enctype) ||
bmp_getint32(in, &info->siz) ||
bmp_getint32(in, &info->hres) || bmp_getint32(in, &info->vres) ||
bmp_getint32(in, &info->numcolors) ||
bmp_getint32(in, &info->mincolors)) {
bmp_info_destroy(info);
return 0;
}
if (info->height < 0) {
info->topdown = 1;
info->height = -info->height;
} else {
info->topdown = 0;
}
if (info->width <= 0 || info->height <= 0 || info->numplanes <= 0 ||
info->depth <= 0 || info->numcolors < 0 || info->mincolors < 0) {
bmp_info_destroy(info);
return 0;
}
if (info->enctype != BMP_ENC_RGB) {
jas_eprintf("unsupported BMP encoding\n");
bmp_info_destroy(info);
return 0;
}
if (info->numcolors > 0) {
if (!(info->palents = jas_alloc2(info->numcolors,
sizeof(bmp_palent_t)))) {
bmp_info_destroy(info);
return 0;
}
} else {
info->palents = 0;
}
for (i = 0; i < info->numcolors; ++i) {
palent = &info->palents[i];
if ((palent->blu = jas_stream_getc(in)) == EOF ||
(palent->grn = jas_stream_getc(in)) == EOF ||
(palent->red = jas_stream_getc(in)) == EOF ||
(palent->res = jas_stream_getc(in)) == EOF) {
bmp_info_destroy(info);
return 0;
}
}
return info;
}
static int bmp_getdata(jas_stream_t *in, bmp_info_t *info, jas_image_t *image)
{
int i;
int j;
int y;
jas_matrix_t *cmpts[3];
int numpad;
int red;
int grn;
int blu;
int ret;
int numcmpts;
int cmptno;
int ind;
bmp_palent_t *palent;
int mxind;
int haspal;
assert(info->depth == 8 || info->depth == 24);
assert(info->enctype == BMP_ENC_RGB);
numcmpts = bmp_numcmpts(info);
haspal = bmp_haspal(info);
ret = 0;
for (i = 0; i < numcmpts; ++i) {
cmpts[i] = 0;
}
/* Create temporary matrices to hold component data. */
for (i = 0; i < numcmpts; ++i) {
if (!(cmpts[i] = jas_matrix_create(1, info->width))) {
ret = -1;
goto bmp_getdata_done;
}
}
/* Calculate number of padding bytes per row of image data. */
numpad = (numcmpts * info->width) % 4;
if (numpad) {
numpad = 4 - numpad;
}
mxind = (1 << info->depth) - 1;
for (i = 0; i < info->height; ++i) {
for (j = 0; j < info->width; ++j) {
if (haspal) {
if ((ind = jas_stream_getc(in)) == EOF) {
ret = -1;
goto bmp_getdata_done;
}
if (ind > mxind) {
ret = -1;
goto bmp_getdata_done;
}
if (ind < info->numcolors) {
palent = &info->palents[ind];
red = palent->red;
grn = palent->grn;
blu = palent->blu;
} else {
red = ind;
grn = ind;
blu = ind;
}
} else {
if ((blu = jas_stream_getc(in)) == EOF ||
(grn = jas_stream_getc(in)) == EOF ||
(red = jas_stream_getc(in)) == EOF) {
ret = -1;
goto bmp_getdata_done;
}
}
if (numcmpts == 3) {
jas_matrix_setv(cmpts[0], j, red);
jas_matrix_setv(cmpts[1], j, grn);
jas_matrix_setv(cmpts[2], j, blu);
} else {
jas_matrix_setv(cmpts[0], j, red);
}
}
for (j = numpad; j > 0; --j) {
if (jas_stream_getc(in) == EOF) {
ret = -1;
goto bmp_getdata_done;
}
}
for (cmptno = 0; cmptno < numcmpts; ++cmptno) {
y = info->topdown ? i : (info->height - 1 - i);
if (jas_image_writecmpt(image, cmptno, 0, y, info->width,
1, cmpts[cmptno])) {
ret = -1;
goto bmp_getdata_done;
}
}
}
bmp_getdata_done:
/* Destroy the temporary matrices. */
for (i = 0; i < numcmpts; ++i) {
if (cmpts[i]) {
jas_matrix_destroy(cmpts[i]);
}
}
return ret;
}
/******************************************************************************\
* Code for primitive types.
\******************************************************************************/
static int bmp_getint16(jas_stream_t *in, int_fast16_t *val)
{
int lo;
int hi;
if ((lo = jas_stream_getc(in)) == EOF || (hi = jas_stream_getc(in)) == EOF) {
return -1;
}
if (val) {
*val = (hi << 8) | lo;
}
return 0;
}
static int bmp_getint32(jas_stream_t *in, int_fast32_t *val)
{
int n;
uint_fast32_t v;
int c;
for (n = 4, v = 0;;) {
if ((c = jas_stream_getc(in)) == EOF) {
return -1;
}
v |= (JAS_CAST(uint_fast32_t, c) << 24);
if (--n <= 0) {
break;
}
v >>= 8;
}
if (val) {
*val = v;
}
return 0;
}
static int bmp_gobble(jas_stream_t *in, long n)
{
while (--n >= 0) {
if (jas_stream_getc(in) == EOF) {
return -1;
}
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_5370_1 |
crossvul-cpp_data_bad_1275_3 | /*
** 2015-06-06
**
** The author disclaims copyright to this source code. In place of
** a legal notice, here is a blessing:
**
** May you do good and not evil.
** May you find forgiveness for yourself and forgive others.
** May you share freely, never taking more than you give.
**
*************************************************************************
** This module contains C code that generates VDBE code used to process
** the WHERE clause of SQL statements.
**
** This file was split off from where.c on 2015-06-06 in order to reduce the
** size of where.c and make it easier to edit. This file contains the routines
** that actually generate the bulk of the WHERE loop code. The original where.c
** file retains the code that does query planning and analysis.
*/
#include "sqliteInt.h"
#include "whereInt.h"
#ifndef SQLITE_OMIT_EXPLAIN
/*
** Return the name of the i-th column of the pIdx index.
*/
static const char *explainIndexColumnName(Index *pIdx, int i){
i = pIdx->aiColumn[i];
if( i==XN_EXPR ) return "<expr>";
if( i==XN_ROWID ) return "rowid";
return pIdx->pTable->aCol[i].zName;
}
/*
** This routine is a helper for explainIndexRange() below
**
** pStr holds the text of an expression that we are building up one term
** at a time. This routine adds a new term to the end of the expression.
** Terms are separated by AND so add the "AND" text for second and subsequent
** terms only.
*/
static void explainAppendTerm(
StrAccum *pStr, /* The text expression being built */
Index *pIdx, /* Index to read column names from */
int nTerm, /* Number of terms */
int iTerm, /* Zero-based index of first term. */
int bAnd, /* Non-zero to append " AND " */
const char *zOp /* Name of the operator */
){
int i;
assert( nTerm>=1 );
if( bAnd ) sqlite3_str_append(pStr, " AND ", 5);
if( nTerm>1 ) sqlite3_str_append(pStr, "(", 1);
for(i=0; i<nTerm; i++){
if( i ) sqlite3_str_append(pStr, ",", 1);
sqlite3_str_appendall(pStr, explainIndexColumnName(pIdx, iTerm+i));
}
if( nTerm>1 ) sqlite3_str_append(pStr, ")", 1);
sqlite3_str_append(pStr, zOp, 1);
if( nTerm>1 ) sqlite3_str_append(pStr, "(", 1);
for(i=0; i<nTerm; i++){
if( i ) sqlite3_str_append(pStr, ",", 1);
sqlite3_str_append(pStr, "?", 1);
}
if( nTerm>1 ) sqlite3_str_append(pStr, ")", 1);
}
/*
** Argument pLevel describes a strategy for scanning table pTab. This
** function appends text to pStr that describes the subset of table
** rows scanned by the strategy in the form of an SQL expression.
**
** For example, if the query:
**
** SELECT * FROM t1 WHERE a=1 AND b>2;
**
** is run and there is an index on (a, b), then this function returns a
** string similar to:
**
** "a=? AND b>?"
*/
static void explainIndexRange(StrAccum *pStr, WhereLoop *pLoop){
Index *pIndex = pLoop->u.btree.pIndex;
u16 nEq = pLoop->u.btree.nEq;
u16 nSkip = pLoop->nSkip;
int i, j;
if( nEq==0 && (pLoop->wsFlags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))==0 ) return;
sqlite3_str_append(pStr, " (", 2);
for(i=0; i<nEq; i++){
const char *z = explainIndexColumnName(pIndex, i);
if( i ) sqlite3_str_append(pStr, " AND ", 5);
sqlite3_str_appendf(pStr, i>=nSkip ? "%s=?" : "ANY(%s)", z);
}
j = i;
if( pLoop->wsFlags&WHERE_BTM_LIMIT ){
explainAppendTerm(pStr, pIndex, pLoop->u.btree.nBtm, j, i, ">");
i = 1;
}
if( pLoop->wsFlags&WHERE_TOP_LIMIT ){
explainAppendTerm(pStr, pIndex, pLoop->u.btree.nTop, j, i, "<");
}
sqlite3_str_append(pStr, ")", 1);
}
/*
** This function is a no-op unless currently processing an EXPLAIN QUERY PLAN
** command, or if either SQLITE_DEBUG or SQLITE_ENABLE_STMT_SCANSTATUS was
** defined at compile-time. If it is not a no-op, a single OP_Explain opcode
** is added to the output to describe the table scan strategy in pLevel.
**
** If an OP_Explain opcode is added to the VM, its address is returned.
** Otherwise, if no OP_Explain is coded, zero is returned.
*/
int sqlite3WhereExplainOneScan(
Parse *pParse, /* Parse context */
SrcList *pTabList, /* Table list this loop refers to */
WhereLevel *pLevel, /* Scan to write OP_Explain opcode for */
u16 wctrlFlags /* Flags passed to sqlite3WhereBegin() */
){
int ret = 0;
#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS)
if( sqlite3ParseToplevel(pParse)->explain==2 )
#endif
{
struct SrcList_item *pItem = &pTabList->a[pLevel->iFrom];
Vdbe *v = pParse->pVdbe; /* VM being constructed */
sqlite3 *db = pParse->db; /* Database handle */
int isSearch; /* True for a SEARCH. False for SCAN. */
WhereLoop *pLoop; /* The controlling WhereLoop object */
u32 flags; /* Flags that describe this loop */
char *zMsg; /* Text to add to EQP output */
StrAccum str; /* EQP output string */
char zBuf[100]; /* Initial space for EQP output string */
pLoop = pLevel->pWLoop;
flags = pLoop->wsFlags;
if( (flags&WHERE_MULTI_OR) || (wctrlFlags&WHERE_OR_SUBCLAUSE) ) return 0;
isSearch = (flags&(WHERE_BTM_LIMIT|WHERE_TOP_LIMIT))!=0
|| ((flags&WHERE_VIRTUALTABLE)==0 && (pLoop->u.btree.nEq>0))
|| (wctrlFlags&(WHERE_ORDERBY_MIN|WHERE_ORDERBY_MAX));
sqlite3StrAccumInit(&str, db, zBuf, sizeof(zBuf), SQLITE_MAX_LENGTH);
sqlite3_str_appendall(&str, isSearch ? "SEARCH" : "SCAN");
if( pItem->pSelect ){
sqlite3_str_appendf(&str, " SUBQUERY %u", pItem->pSelect->selId);
}else{
sqlite3_str_appendf(&str, " TABLE %s", pItem->zName);
}
if( pItem->zAlias ){
sqlite3_str_appendf(&str, " AS %s", pItem->zAlias);
}
if( (flags & (WHERE_IPK|WHERE_VIRTUALTABLE))==0 ){
const char *zFmt = 0;
Index *pIdx;
assert( pLoop->u.btree.pIndex!=0 );
pIdx = pLoop->u.btree.pIndex;
assert( !(flags&WHERE_AUTO_INDEX) || (flags&WHERE_IDX_ONLY) );
if( !HasRowid(pItem->pTab) && IsPrimaryKeyIndex(pIdx) ){
if( isSearch ){
zFmt = "PRIMARY KEY";
}
}else if( flags & WHERE_PARTIALIDX ){
zFmt = "AUTOMATIC PARTIAL COVERING INDEX";
}else if( flags & WHERE_AUTO_INDEX ){
zFmt = "AUTOMATIC COVERING INDEX";
}else if( flags & WHERE_IDX_ONLY ){
zFmt = "COVERING INDEX %s";
}else{
zFmt = "INDEX %s";
}
if( zFmt ){
sqlite3_str_append(&str, " USING ", 7);
sqlite3_str_appendf(&str, zFmt, pIdx->zName);
explainIndexRange(&str, pLoop);
}
}else if( (flags & WHERE_IPK)!=0 && (flags & WHERE_CONSTRAINT)!=0 ){
const char *zRangeOp;
if( flags&(WHERE_COLUMN_EQ|WHERE_COLUMN_IN) ){
zRangeOp = "=";
}else if( (flags&WHERE_BOTH_LIMIT)==WHERE_BOTH_LIMIT ){
zRangeOp = ">? AND rowid<";
}else if( flags&WHERE_BTM_LIMIT ){
zRangeOp = ">";
}else{
assert( flags&WHERE_TOP_LIMIT);
zRangeOp = "<";
}
sqlite3_str_appendf(&str,
" USING INTEGER PRIMARY KEY (rowid%s?)",zRangeOp);
}
#ifndef SQLITE_OMIT_VIRTUALTABLE
else if( (flags & WHERE_VIRTUALTABLE)!=0 ){
sqlite3_str_appendf(&str, " VIRTUAL TABLE INDEX %d:%s",
pLoop->u.vtab.idxNum, pLoop->u.vtab.idxStr);
}
#endif
#ifdef SQLITE_EXPLAIN_ESTIMATED_ROWS
if( pLoop->nOut>=10 ){
sqlite3_str_appendf(&str, " (~%llu rows)",
sqlite3LogEstToInt(pLoop->nOut));
}else{
sqlite3_str_append(&str, " (~1 row)", 9);
}
#endif
zMsg = sqlite3StrAccumFinish(&str);
sqlite3ExplainBreakpoint("",zMsg);
ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v),
pParse->addrExplain, 0, zMsg,P4_DYNAMIC);
}
return ret;
}
#endif /* SQLITE_OMIT_EXPLAIN */
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
/*
** Configure the VM passed as the first argument with an
** sqlite3_stmt_scanstatus() entry corresponding to the scan used to
** implement level pLvl. Argument pSrclist is a pointer to the FROM
** clause that the scan reads data from.
**
** If argument addrExplain is not 0, it must be the address of an
** OP_Explain instruction that describes the same loop.
*/
void sqlite3WhereAddScanStatus(
Vdbe *v, /* Vdbe to add scanstatus entry to */
SrcList *pSrclist, /* FROM clause pLvl reads data from */
WhereLevel *pLvl, /* Level to add scanstatus() entry for */
int addrExplain /* Address of OP_Explain (or 0) */
){
const char *zObj = 0;
WhereLoop *pLoop = pLvl->pWLoop;
if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){
zObj = pLoop->u.btree.pIndex->zName;
}else{
zObj = pSrclist->a[pLvl->iFrom].zName;
}
sqlite3VdbeScanStatus(
v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj
);
}
#endif
/*
** Disable a term in the WHERE clause. Except, do not disable the term
** if it controls a LEFT OUTER JOIN and it did not originate in the ON
** or USING clause of that join.
**
** Consider the term t2.z='ok' in the following queries:
**
** (1) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x WHERE t2.z='ok'
** (2) SELECT * FROM t1 LEFT JOIN t2 ON t1.a=t2.x AND t2.z='ok'
** (3) SELECT * FROM t1, t2 WHERE t1.a=t2.x AND t2.z='ok'
**
** The t2.z='ok' is disabled in the in (2) because it originates
** in the ON clause. The term is disabled in (3) because it is not part
** of a LEFT OUTER JOIN. In (1), the term is not disabled.
**
** Disabling a term causes that term to not be tested in the inner loop
** of the join. Disabling is an optimization. When terms are satisfied
** by indices, we disable them to prevent redundant tests in the inner
** loop. We would get the correct results if nothing were ever disabled,
** but joins might run a little slower. The trick is to disable as much
** as we can without disabling too much. If we disabled in (1), we'd get
** the wrong answer. See ticket #813.
**
** If all the children of a term are disabled, then that term is also
** automatically disabled. In this way, terms get disabled if derived
** virtual terms are tested first. For example:
**
** x GLOB 'abc*' AND x>='abc' AND x<'acd'
** \___________/ \______/ \_____/
** parent child1 child2
**
** Only the parent term was in the original WHERE clause. The child1
** and child2 terms were added by the LIKE optimization. If both of
** the virtual child terms are valid, then testing of the parent can be
** skipped.
**
** Usually the parent term is marked as TERM_CODED. But if the parent
** term was originally TERM_LIKE, then the parent gets TERM_LIKECOND instead.
** The TERM_LIKECOND marking indicates that the term should be coded inside
** a conditional such that is only evaluated on the second pass of a
** LIKE-optimization loop, when scanning BLOBs instead of strings.
*/
static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){
int nLoop = 0;
assert( pTerm!=0 );
while( (pTerm->wtFlags & TERM_CODED)==0
&& (pLevel->iLeftJoin==0 || ExprHasProperty(pTerm->pExpr, EP_FromJoin))
&& (pLevel->notReady & pTerm->prereqAll)==0
){
if( nLoop && (pTerm->wtFlags & TERM_LIKE)!=0 ){
pTerm->wtFlags |= TERM_LIKECOND;
}else{
pTerm->wtFlags |= TERM_CODED;
}
if( pTerm->iParent<0 ) break;
pTerm = &pTerm->pWC->a[pTerm->iParent];
assert( pTerm!=0 );
pTerm->nChild--;
if( pTerm->nChild!=0 ) break;
nLoop++;
}
}
/*
** Code an OP_Affinity opcode to apply the column affinity string zAff
** to the n registers starting at base.
**
** As an optimization, SQLITE_AFF_BLOB and SQLITE_AFF_NONE entries (which
** are no-ops) at the beginning and end of zAff are ignored. If all entries
** in zAff are SQLITE_AFF_BLOB or SQLITE_AFF_NONE, then no code gets generated.
**
** This routine makes its own copy of zAff so that the caller is free
** to modify zAff after this routine returns.
*/
static void codeApplyAffinity(Parse *pParse, int base, int n, char *zAff){
Vdbe *v = pParse->pVdbe;
if( zAff==0 ){
assert( pParse->db->mallocFailed );
return;
}
assert( v!=0 );
/* Adjust base and n to skip over SQLITE_AFF_BLOB and SQLITE_AFF_NONE
** entries at the beginning and end of the affinity string.
*/
assert( SQLITE_AFF_NONE<SQLITE_AFF_BLOB );
while( n>0 && zAff[0]<=SQLITE_AFF_BLOB ){
n--;
base++;
zAff++;
}
while( n>1 && zAff[n-1]<=SQLITE_AFF_BLOB ){
n--;
}
/* Code the OP_Affinity opcode if there is anything left to do. */
if( n>0 ){
sqlite3VdbeAddOp4(v, OP_Affinity, base, n, 0, zAff, n);
}
}
/*
** Expression pRight, which is the RHS of a comparison operation, is
** either a vector of n elements or, if n==1, a scalar expression.
** Before the comparison operation, affinity zAff is to be applied
** to the pRight values. This function modifies characters within the
** affinity string to SQLITE_AFF_BLOB if either:
**
** * the comparison will be performed with no affinity, or
** * the affinity change in zAff is guaranteed not to change the value.
*/
static void updateRangeAffinityStr(
Expr *pRight, /* RHS of comparison */
int n, /* Number of vector elements in comparison */
char *zAff /* Affinity string to modify */
){
int i;
for(i=0; i<n; i++){
Expr *p = sqlite3VectorFieldSubexpr(pRight, i);
if( sqlite3CompareAffinity(p, zAff[i])==SQLITE_AFF_BLOB
|| sqlite3ExprNeedsNoAffinityChange(p, zAff[i])
){
zAff[i] = SQLITE_AFF_BLOB;
}
}
}
/*
** pX is an expression of the form: (vector) IN (SELECT ...)
** In other words, it is a vector IN operator with a SELECT clause on the
** LHS. But not all terms in the vector are indexable and the terms might
** not be in the correct order for indexing.
**
** This routine makes a copy of the input pX expression and then adjusts
** the vector on the LHS with corresponding changes to the SELECT so that
** the vector contains only index terms and those terms are in the correct
** order. The modified IN expression is returned. The caller is responsible
** for deleting the returned expression.
**
** Example:
**
** CREATE TABLE t1(a,b,c,d,e,f);
** CREATE INDEX t1x1 ON t1(e,c);
** SELECT * FROM t1 WHERE (a,b,c,d,e) IN (SELECT v,w,x,y,z FROM t2)
** \_______________________________________/
** The pX expression
**
** Since only columns e and c can be used with the index, in that order,
** the modified IN expression that is returned will be:
**
** (e,c) IN (SELECT z,x FROM t2)
**
** The reduced pX is different from the original (obviously) and thus is
** only used for indexing, to improve performance. The original unaltered
** IN expression must also be run on each output row for correctness.
*/
static Expr *removeUnindexableInClauseTerms(
Parse *pParse, /* The parsing context */
int iEq, /* Look at loop terms starting here */
WhereLoop *pLoop, /* The current loop */
Expr *pX /* The IN expression to be reduced */
){
sqlite3 *db = pParse->db;
Expr *pNew = sqlite3ExprDup(db, pX, 0);
if( db->mallocFailed==0 ){
ExprList *pOrigRhs = pNew->x.pSelect->pEList; /* Original unmodified RHS */
ExprList *pOrigLhs = pNew->pLeft->x.pList; /* Original unmodified LHS */
ExprList *pRhs = 0; /* New RHS after modifications */
ExprList *pLhs = 0; /* New LHS after mods */
int i; /* Loop counter */
Select *pSelect; /* Pointer to the SELECT on the RHS */
for(i=iEq; i<pLoop->nLTerm; i++){
if( pLoop->aLTerm[i]->pExpr==pX ){
int iField = pLoop->aLTerm[i]->iField - 1;
if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */
pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr);
pOrigRhs->a[iField].pExpr = 0;
assert( pOrigLhs->a[iField].pExpr!=0 );
pLhs = sqlite3ExprListAppend(pParse, pLhs, pOrigLhs->a[iField].pExpr);
pOrigLhs->a[iField].pExpr = 0;
}
}
sqlite3ExprListDelete(db, pOrigRhs);
sqlite3ExprListDelete(db, pOrigLhs);
pNew->pLeft->x.pList = pLhs;
pNew->x.pSelect->pEList = pRhs;
if( pLhs && pLhs->nExpr==1 ){
/* Take care here not to generate a TK_VECTOR containing only a
** single value. Since the parser never creates such a vector, some
** of the subroutines do not handle this case. */
Expr *p = pLhs->a[0].pExpr;
pLhs->a[0].pExpr = 0;
sqlite3ExprDelete(db, pNew->pLeft);
pNew->pLeft = p;
}
pSelect = pNew->x.pSelect;
if( pSelect->pOrderBy ){
/* If the SELECT statement has an ORDER BY clause, zero the
** iOrderByCol variables. These are set to non-zero when an
** ORDER BY term exactly matches one of the terms of the
** result-set. Since the result-set of the SELECT statement may
** have been modified or reordered, these variables are no longer
** set correctly. Since setting them is just an optimization,
** it's easiest just to zero them here. */
ExprList *pOrderBy = pSelect->pOrderBy;
for(i=0; i<pOrderBy->nExpr; i++){
pOrderBy->a[i].u.x.iOrderByCol = 0;
}
}
#if 0
printf("For indexing, change the IN expr:\n");
sqlite3TreeViewExpr(0, pX, 0);
printf("Into:\n");
sqlite3TreeViewExpr(0, pNew, 0);
#endif
}
return pNew;
}
/*
** Generate code for a single equality term of the WHERE clause. An equality
** term can be either X=expr or X IN (...). pTerm is the term to be
** coded.
**
** The current value for the constraint is left in a register, the index
** of which is returned. An attempt is made store the result in iTarget but
** this is only guaranteed for TK_ISNULL and TK_IN constraints. If the
** constraint is a TK_EQ or TK_IS, then the current value might be left in
** some other register and it is the caller's responsibility to compensate.
**
** For a constraint of the form X=expr, the expression is evaluated in
** straight-line code. For constraints of the form X IN (...)
** this routine sets up a loop that will iterate over all values of X.
*/
static int codeEqualityTerm(
Parse *pParse, /* The parsing context */
WhereTerm *pTerm, /* The term of the WHERE clause to be coded */
WhereLevel *pLevel, /* The level of the FROM clause we are working on */
int iEq, /* Index of the equality term within this level */
int bRev, /* True for reverse-order IN operations */
int iTarget /* Attempt to leave results in this register */
){
Expr *pX = pTerm->pExpr;
Vdbe *v = pParse->pVdbe;
int iReg; /* Register holding results */
assert( pLevel->pWLoop->aLTerm[iEq]==pTerm );
assert( iTarget>0 );
if( pX->op==TK_EQ || pX->op==TK_IS ){
iReg = sqlite3ExprCodeTarget(pParse, pX->pRight, iTarget);
}else if( pX->op==TK_ISNULL ){
iReg = iTarget;
sqlite3VdbeAddOp2(v, OP_Null, 0, iReg);
#ifndef SQLITE_OMIT_SUBQUERY
}else{
int eType = IN_INDEX_NOOP;
int iTab;
struct InLoop *pIn;
WhereLoop *pLoop = pLevel->pWLoop;
int i;
int nEq = 0;
int *aiMap = 0;
if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0
&& pLoop->u.btree.pIndex!=0
&& pLoop->u.btree.pIndex->aSortOrder[iEq]
){
testcase( iEq==0 );
testcase( bRev );
bRev = !bRev;
}
assert( pX->op==TK_IN );
iReg = iTarget;
for(i=0; i<iEq; i++){
if( pLoop->aLTerm[i] && pLoop->aLTerm[i]->pExpr==pX ){
disableTerm(pLevel, pTerm);
return iTarget;
}
}
for(i=iEq;i<pLoop->nLTerm; i++){
assert( pLoop->aLTerm[i]!=0 );
if( pLoop->aLTerm[i]->pExpr==pX ) nEq++;
}
iTab = 0;
if( (pX->flags & EP_xIsSelect)==0 || pX->x.pSelect->pEList->nExpr==1 ){
eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, 0, &iTab);
}else{
sqlite3 *db = pParse->db;
pX = removeUnindexableInClauseTerms(pParse, iEq, pLoop, pX);
if( !db->mallocFailed ){
aiMap = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*nEq);
eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap, &iTab);
pTerm->pExpr->iTable = iTab;
}
sqlite3ExprDelete(db, pX);
pX = pTerm->pExpr;
}
if( eType==IN_INDEX_INDEX_DESC ){
testcase( bRev );
bRev = !bRev;
}
sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iTab, 0);
VdbeCoverageIf(v, bRev);
VdbeCoverageIf(v, !bRev);
assert( (pLoop->wsFlags & WHERE_MULTI_OR)==0 );
pLoop->wsFlags |= WHERE_IN_ABLE;
if( pLevel->u.in.nIn==0 ){
pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse);
}
i = pLevel->u.in.nIn;
pLevel->u.in.nIn += nEq;
pLevel->u.in.aInLoop =
sqlite3DbReallocOrFree(pParse->db, pLevel->u.in.aInLoop,
sizeof(pLevel->u.in.aInLoop[0])*pLevel->u.in.nIn);
pIn = pLevel->u.in.aInLoop;
if( pIn ){
int iMap = 0; /* Index in aiMap[] */
pIn += i;
for(i=iEq;i<pLoop->nLTerm; i++){
if( pLoop->aLTerm[i]->pExpr==pX ){
int iOut = iReg + i - iEq;
if( eType==IN_INDEX_ROWID ){
pIn->addrInTop = sqlite3VdbeAddOp2(v, OP_Rowid, iTab, iOut);
}else{
int iCol = aiMap ? aiMap[iMap++] : 0;
pIn->addrInTop = sqlite3VdbeAddOp3(v,OP_Column,iTab, iCol, iOut);
}
sqlite3VdbeAddOp1(v, OP_IsNull, iOut); VdbeCoverage(v);
if( i==iEq ){
pIn->iCur = iTab;
pIn->eEndLoopOp = bRev ? OP_Prev : OP_Next;
if( iEq>0 && (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 ){
pIn->iBase = iReg - i;
pIn->nPrefix = i;
pLoop->wsFlags |= WHERE_IN_EARLYOUT;
}else{
pIn->nPrefix = 0;
}
}else{
pIn->eEndLoopOp = OP_Noop;
}
pIn++;
}
}
}else{
pLevel->u.in.nIn = 0;
}
sqlite3DbFree(pParse->db, aiMap);
#endif
}
disableTerm(pLevel, pTerm);
return iReg;
}
/*
** Generate code that will evaluate all == and IN constraints for an
** index scan.
**
** For example, consider table t1(a,b,c,d,e,f) with index i1(a,b,c).
** Suppose the WHERE clause is this: a==5 AND b IN (1,2,3) AND c>5 AND c<10
** The index has as many as three equality constraints, but in this
** example, the third "c" value is an inequality. So only two
** constraints are coded. This routine will generate code to evaluate
** a==5 and b IN (1,2,3). The current values for a and b will be stored
** in consecutive registers and the index of the first register is returned.
**
** In the example above nEq==2. But this subroutine works for any value
** of nEq including 0. If nEq==0, this routine is nearly a no-op.
** The only thing it does is allocate the pLevel->iMem memory cell and
** compute the affinity string.
**
** The nExtraReg parameter is 0 or 1. It is 0 if all WHERE clause constraints
** are == or IN and are covered by the nEq. nExtraReg is 1 if there is
** an inequality constraint (such as the "c>=5 AND c<10" in the example) that
** occurs after the nEq quality constraints.
**
** This routine allocates a range of nEq+nExtraReg memory cells and returns
** the index of the first memory cell in that range. The code that
** calls this routine will use that memory range to store keys for
** start and termination conditions of the loop.
** key value of the loop. If one or more IN operators appear, then
** this routine allocates an additional nEq memory cells for internal
** use.
**
** Before returning, *pzAff is set to point to a buffer containing a
** copy of the column affinity string of the index allocated using
** sqlite3DbMalloc(). Except, entries in the copy of the string associated
** with equality constraints that use BLOB or NONE affinity are set to
** SQLITE_AFF_BLOB. This is to deal with SQL such as the following:
**
** CREATE TABLE t1(a TEXT PRIMARY KEY, b);
** SELECT ... FROM t1 AS t2, t1 WHERE t1.a = t2.b;
**
** In the example above, the index on t1(a) has TEXT affinity. But since
** the right hand side of the equality constraint (t2.b) has BLOB/NONE affinity,
** no conversion should be attempted before using a t2.b value as part of
** a key to search the index. Hence the first byte in the returned affinity
** string in this example would be set to SQLITE_AFF_BLOB.
*/
static int codeAllEqualityTerms(
Parse *pParse, /* Parsing context */
WhereLevel *pLevel, /* Which nested loop of the FROM we are coding */
int bRev, /* Reverse the order of IN operators */
int nExtraReg, /* Number of extra registers to allocate */
char **pzAff /* OUT: Set to point to affinity string */
){
u16 nEq; /* The number of == or IN constraints to code */
u16 nSkip; /* Number of left-most columns to skip */
Vdbe *v = pParse->pVdbe; /* The vm under construction */
Index *pIdx; /* The index being used for this loop */
WhereTerm *pTerm; /* A single constraint term */
WhereLoop *pLoop; /* The WhereLoop object */
int j; /* Loop counter */
int regBase; /* Base register */
int nReg; /* Number of registers to allocate */
char *zAff; /* Affinity string to return */
/* This module is only called on query plans that use an index. */
pLoop = pLevel->pWLoop;
assert( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 );
nEq = pLoop->u.btree.nEq;
nSkip = pLoop->nSkip;
pIdx = pLoop->u.btree.pIndex;
assert( pIdx!=0 );
/* Figure out how many memory cells we will need then allocate them.
*/
regBase = pParse->nMem + 1;
nReg = pLoop->u.btree.nEq + nExtraReg;
pParse->nMem += nReg;
zAff = sqlite3DbStrDup(pParse->db,sqlite3IndexAffinityStr(pParse->db,pIdx));
assert( zAff!=0 || pParse->db->mallocFailed );
if( nSkip ){
int iIdxCur = pLevel->iIdxCur;
sqlite3VdbeAddOp1(v, (bRev?OP_Last:OP_Rewind), iIdxCur);
VdbeCoverageIf(v, bRev==0);
VdbeCoverageIf(v, bRev!=0);
VdbeComment((v, "begin skip-scan on %s", pIdx->zName));
j = sqlite3VdbeAddOp0(v, OP_Goto);
pLevel->addrSkip = sqlite3VdbeAddOp4Int(v, (bRev?OP_SeekLT:OP_SeekGT),
iIdxCur, 0, regBase, nSkip);
VdbeCoverageIf(v, bRev==0);
VdbeCoverageIf(v, bRev!=0);
sqlite3VdbeJumpHere(v, j);
for(j=0; j<nSkip; j++){
sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, j, regBase+j);
testcase( pIdx->aiColumn[j]==XN_EXPR );
VdbeComment((v, "%s", explainIndexColumnName(pIdx, j)));
}
}
/* Evaluate the equality constraints
*/
assert( zAff==0 || (int)strlen(zAff)>=nEq );
for(j=nSkip; j<nEq; j++){
int r1;
pTerm = pLoop->aLTerm[j];
assert( pTerm!=0 );
/* The following testcase is true for indices with redundant columns.
** Ex: CREATE INDEX i1 ON t1(a,b,a); SELECT * FROM t1 WHERE a=0 AND b=0; */
testcase( (pTerm->wtFlags & TERM_CODED)!=0 );
testcase( pTerm->wtFlags & TERM_VIRTUAL );
r1 = codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, regBase+j);
if( r1!=regBase+j ){
if( nReg==1 ){
sqlite3ReleaseTempReg(pParse, regBase);
regBase = r1;
}else{
sqlite3VdbeAddOp2(v, OP_SCopy, r1, regBase+j);
}
}
if( pTerm->eOperator & WO_IN ){
if( pTerm->pExpr->flags & EP_xIsSelect ){
/* No affinity ever needs to be (or should be) applied to a value
** from the RHS of an "? IN (SELECT ...)" expression. The
** sqlite3FindInIndex() routine has already ensured that the
** affinity of the comparison has been applied to the value. */
if( zAff ) zAff[j] = SQLITE_AFF_BLOB;
}
}else if( (pTerm->eOperator & WO_ISNULL)==0 ){
Expr *pRight = pTerm->pExpr->pRight;
if( (pTerm->wtFlags & TERM_IS)==0 && sqlite3ExprCanBeNull(pRight) ){
sqlite3VdbeAddOp2(v, OP_IsNull, regBase+j, pLevel->addrBrk);
VdbeCoverage(v);
}
if( zAff ){
if( sqlite3CompareAffinity(pRight, zAff[j])==SQLITE_AFF_BLOB ){
zAff[j] = SQLITE_AFF_BLOB;
}
if( sqlite3ExprNeedsNoAffinityChange(pRight, zAff[j]) ){
zAff[j] = SQLITE_AFF_BLOB;
}
}
}
}
*pzAff = zAff;
return regBase;
}
#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
/*
** If the most recently coded instruction is a constant range constraint
** (a string literal) that originated from the LIKE optimization, then
** set P3 and P5 on the OP_String opcode so that the string will be cast
** to a BLOB at appropriate times.
**
** The LIKE optimization trys to evaluate "x LIKE 'abc%'" as a range
** expression: "x>='ABC' AND x<'abd'". But this requires that the range
** scan loop run twice, once for strings and a second time for BLOBs.
** The OP_String opcodes on the second pass convert the upper and lower
** bound string constants to blobs. This routine makes the necessary changes
** to the OP_String opcodes for that to happen.
**
** Except, of course, if SQLITE_LIKE_DOESNT_MATCH_BLOBS is defined, then
** only the one pass through the string space is required, so this routine
** becomes a no-op.
*/
static void whereLikeOptimizationStringFixup(
Vdbe *v, /* prepared statement under construction */
WhereLevel *pLevel, /* The loop that contains the LIKE operator */
WhereTerm *pTerm /* The upper or lower bound just coded */
){
if( pTerm->wtFlags & TERM_LIKEOPT ){
VdbeOp *pOp;
assert( pLevel->iLikeRepCntr>0 );
pOp = sqlite3VdbeGetOp(v, -1);
assert( pOp!=0 );
assert( pOp->opcode==OP_String8
|| pTerm->pWC->pWInfo->pParse->db->mallocFailed );
pOp->p3 = (int)(pLevel->iLikeRepCntr>>1); /* Register holding counter */
pOp->p5 = (u8)(pLevel->iLikeRepCntr&1); /* ASC or DESC */
}
}
#else
# define whereLikeOptimizationStringFixup(A,B,C)
#endif
#ifdef SQLITE_ENABLE_CURSOR_HINTS
/*
** Information is passed from codeCursorHint() down to individual nodes of
** the expression tree (by sqlite3WalkExpr()) using an instance of this
** structure.
*/
struct CCurHint {
int iTabCur; /* Cursor for the main table */
int iIdxCur; /* Cursor for the index, if pIdx!=0. Unused otherwise */
Index *pIdx; /* The index used to access the table */
};
/*
** This function is called for every node of an expression that is a candidate
** for a cursor hint on an index cursor. For TK_COLUMN nodes that reference
** the table CCurHint.iTabCur, verify that the same column can be
** accessed through the index. If it cannot, then set pWalker->eCode to 1.
*/
static int codeCursorHintCheckExpr(Walker *pWalker, Expr *pExpr){
struct CCurHint *pHint = pWalker->u.pCCurHint;
assert( pHint->pIdx!=0 );
if( pExpr->op==TK_COLUMN
&& pExpr->iTable==pHint->iTabCur
&& sqlite3TableColumnToIndex(pHint->pIdx, pExpr->iColumn)<0
){
pWalker->eCode = 1;
}
return WRC_Continue;
}
/*
** Test whether or not expression pExpr, which was part of a WHERE clause,
** should be included in the cursor-hint for a table that is on the rhs
** of a LEFT JOIN. Set Walker.eCode to non-zero before returning if the
** expression is not suitable.
**
** An expression is unsuitable if it might evaluate to non NULL even if
** a TK_COLUMN node that does affect the value of the expression is set
** to NULL. For example:
**
** col IS NULL
** col IS NOT NULL
** coalesce(col, 1)
** CASE WHEN col THEN 0 ELSE 1 END
*/
static int codeCursorHintIsOrFunction(Walker *pWalker, Expr *pExpr){
if( pExpr->op==TK_IS
|| pExpr->op==TK_ISNULL || pExpr->op==TK_ISNOT
|| pExpr->op==TK_NOTNULL || pExpr->op==TK_CASE
){
pWalker->eCode = 1;
}else if( pExpr->op==TK_FUNCTION ){
int d1;
char d2[4];
if( 0==sqlite3IsLikeFunction(pWalker->pParse->db, pExpr, &d1, d2) ){
pWalker->eCode = 1;
}
}
return WRC_Continue;
}
/*
** This function is called on every node of an expression tree used as an
** argument to the OP_CursorHint instruction. If the node is a TK_COLUMN
** that accesses any table other than the one identified by
** CCurHint.iTabCur, then do the following:
**
** 1) allocate a register and code an OP_Column instruction to read
** the specified column into the new register, and
**
** 2) transform the expression node to a TK_REGISTER node that reads
** from the newly populated register.
**
** Also, if the node is a TK_COLUMN that does access the table idenified
** by pCCurHint.iTabCur, and an index is being used (which we will
** know because CCurHint.pIdx!=0) then transform the TK_COLUMN into
** an access of the index rather than the original table.
*/
static int codeCursorHintFixExpr(Walker *pWalker, Expr *pExpr){
int rc = WRC_Continue;
struct CCurHint *pHint = pWalker->u.pCCurHint;
if( pExpr->op==TK_COLUMN ){
if( pExpr->iTable!=pHint->iTabCur ){
int reg = ++pWalker->pParse->nMem; /* Register for column value */
sqlite3ExprCode(pWalker->pParse, pExpr, reg);
pExpr->op = TK_REGISTER;
pExpr->iTable = reg;
}else if( pHint->pIdx!=0 ){
pExpr->iTable = pHint->iIdxCur;
pExpr->iColumn = sqlite3TableColumnToIndex(pHint->pIdx, pExpr->iColumn);
assert( pExpr->iColumn>=0 );
}
}else if( pExpr->op==TK_AGG_FUNCTION ){
/* An aggregate function in the WHERE clause of a query means this must
** be a correlated sub-query, and expression pExpr is an aggregate from
** the parent context. Do not walk the function arguments in this case.
**
** todo: It should be possible to replace this node with a TK_REGISTER
** expression, as the result of the expression must be stored in a
** register at this point. The same holds for TK_AGG_COLUMN nodes. */
rc = WRC_Prune;
}
return rc;
}
/*
** Insert an OP_CursorHint instruction if it is appropriate to do so.
*/
static void codeCursorHint(
struct SrcList_item *pTabItem, /* FROM clause item */
WhereInfo *pWInfo, /* The where clause */
WhereLevel *pLevel, /* Which loop to provide hints for */
WhereTerm *pEndRange /* Hint this end-of-scan boundary term if not NULL */
){
Parse *pParse = pWInfo->pParse;
sqlite3 *db = pParse->db;
Vdbe *v = pParse->pVdbe;
Expr *pExpr = 0;
WhereLoop *pLoop = pLevel->pWLoop;
int iCur;
WhereClause *pWC;
WhereTerm *pTerm;
int i, j;
struct CCurHint sHint;
Walker sWalker;
if( OptimizationDisabled(db, SQLITE_CursorHints) ) return;
iCur = pLevel->iTabCur;
assert( iCur==pWInfo->pTabList->a[pLevel->iFrom].iCursor );
sHint.iTabCur = iCur;
sHint.iIdxCur = pLevel->iIdxCur;
sHint.pIdx = pLoop->u.btree.pIndex;
memset(&sWalker, 0, sizeof(sWalker));
sWalker.pParse = pParse;
sWalker.u.pCCurHint = &sHint;
pWC = &pWInfo->sWC;
for(i=0; i<pWC->nTerm; i++){
pTerm = &pWC->a[i];
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
if( pTerm->prereqAll & pLevel->notReady ) continue;
/* Any terms specified as part of the ON(...) clause for any LEFT
** JOIN for which the current table is not the rhs are omitted
** from the cursor-hint.
**
** If this table is the rhs of a LEFT JOIN, "IS" or "IS NULL" terms
** that were specified as part of the WHERE clause must be excluded.
** This is to address the following:
**
** SELECT ... t1 LEFT JOIN t2 ON (t1.a=t2.b) WHERE t2.c IS NULL;
**
** Say there is a single row in t2 that matches (t1.a=t2.b), but its
** t2.c values is not NULL. If the (t2.c IS NULL) constraint is
** pushed down to the cursor, this row is filtered out, causing
** SQLite to synthesize a row of NULL values. Which does match the
** WHERE clause, and so the query returns a row. Which is incorrect.
**
** For the same reason, WHERE terms such as:
**
** WHERE 1 = (t2.c IS NULL)
**
** are also excluded. See codeCursorHintIsOrFunction() for details.
*/
if( pTabItem->fg.jointype & JT_LEFT ){
Expr *pExpr = pTerm->pExpr;
if( !ExprHasProperty(pExpr, EP_FromJoin)
|| pExpr->iRightJoinTable!=pTabItem->iCursor
){
sWalker.eCode = 0;
sWalker.xExprCallback = codeCursorHintIsOrFunction;
sqlite3WalkExpr(&sWalker, pTerm->pExpr);
if( sWalker.eCode ) continue;
}
}else{
if( ExprHasProperty(pTerm->pExpr, EP_FromJoin) ) continue;
}
/* All terms in pWLoop->aLTerm[] except pEndRange are used to initialize
** the cursor. These terms are not needed as hints for a pure range
** scan (that has no == terms) so omit them. */
if( pLoop->u.btree.nEq==0 && pTerm!=pEndRange ){
for(j=0; j<pLoop->nLTerm && pLoop->aLTerm[j]!=pTerm; j++){}
if( j<pLoop->nLTerm ) continue;
}
/* No subqueries or non-deterministic functions allowed */
if( sqlite3ExprContainsSubquery(pTerm->pExpr) ) continue;
/* For an index scan, make sure referenced columns are actually in
** the index. */
if( sHint.pIdx!=0 ){
sWalker.eCode = 0;
sWalker.xExprCallback = codeCursorHintCheckExpr;
sqlite3WalkExpr(&sWalker, pTerm->pExpr);
if( sWalker.eCode ) continue;
}
/* If we survive all prior tests, that means this term is worth hinting */
pExpr = sqlite3ExprAnd(pParse, pExpr, sqlite3ExprDup(db, pTerm->pExpr, 0));
}
if( pExpr!=0 ){
sWalker.xExprCallback = codeCursorHintFixExpr;
sqlite3WalkExpr(&sWalker, pExpr);
sqlite3VdbeAddOp4(v, OP_CursorHint,
(sHint.pIdx ? sHint.iIdxCur : sHint.iTabCur), 0, 0,
(const char*)pExpr, P4_EXPR);
}
}
#else
# define codeCursorHint(A,B,C,D) /* No-op */
#endif /* SQLITE_ENABLE_CURSOR_HINTS */
/*
** Cursor iCur is open on an intkey b-tree (a table). Register iRowid contains
** a rowid value just read from cursor iIdxCur, open on index pIdx. This
** function generates code to do a deferred seek of cursor iCur to the
** rowid stored in register iRowid.
**
** Normally, this is just:
**
** OP_DeferredSeek $iCur $iRowid
**
** However, if the scan currently being coded is a branch of an OR-loop and
** the statement currently being coded is a SELECT, then P3 of OP_DeferredSeek
** is set to iIdxCur and P4 is set to point to an array of integers
** containing one entry for each column of the table cursor iCur is open
** on. For each table column, if the column is the i'th column of the
** index, then the corresponding array entry is set to (i+1). If the column
** does not appear in the index at all, the array entry is set to 0.
*/
static void codeDeferredSeek(
WhereInfo *pWInfo, /* Where clause context */
Index *pIdx, /* Index scan is using */
int iCur, /* Cursor for IPK b-tree */
int iIdxCur /* Index cursor */
){
Parse *pParse = pWInfo->pParse; /* Parse context */
Vdbe *v = pParse->pVdbe; /* Vdbe to generate code within */
assert( iIdxCur>0 );
assert( pIdx->aiColumn[pIdx->nColumn-1]==-1 );
sqlite3VdbeAddOp3(v, OP_DeferredSeek, iIdxCur, 0, iCur);
if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)
&& DbMaskAllZero(sqlite3ParseToplevel(pParse)->writeMask)
){
int i;
Table *pTab = pIdx->pTable;
int *ai = (int*)sqlite3DbMallocZero(pParse->db, sizeof(int)*(pTab->nCol+1));
if( ai ){
ai[0] = pTab->nCol;
for(i=0; i<pIdx->nColumn-1; i++){
int x1, x2;
assert( pIdx->aiColumn[i]<pTab->nCol );
x1 = pIdx->aiColumn[i];
x2 = sqlite3TableColumnToStorage(pTab, x1);
testcase( x1!=x2 );
if( x1>=0 ) ai[x2+1] = i+1;
}
sqlite3VdbeChangeP4(v, -1, (char*)ai, P4_INTARRAY);
}
}
}
/*
** If the expression passed as the second argument is a vector, generate
** code to write the first nReg elements of the vector into an array
** of registers starting with iReg.
**
** If the expression is not a vector, then nReg must be passed 1. In
** this case, generate code to evaluate the expression and leave the
** result in register iReg.
*/
static void codeExprOrVector(Parse *pParse, Expr *p, int iReg, int nReg){
assert( nReg>0 );
if( p && sqlite3ExprIsVector(p) ){
#ifndef SQLITE_OMIT_SUBQUERY
if( (p->flags & EP_xIsSelect) ){
Vdbe *v = pParse->pVdbe;
int iSelect;
assert( p->op==TK_SELECT );
iSelect = sqlite3CodeSubselect(pParse, p);
sqlite3VdbeAddOp3(v, OP_Copy, iSelect, iReg, nReg-1);
}else
#endif
{
int i;
ExprList *pList = p->x.pList;
assert( nReg<=pList->nExpr );
for(i=0; i<nReg; i++){
sqlite3ExprCode(pParse, pList->a[i].pExpr, iReg+i);
}
}
}else{
assert( nReg==1 );
sqlite3ExprCode(pParse, p, iReg);
}
}
/* An instance of the IdxExprTrans object carries information about a
** mapping from an expression on table columns into a column in an index
** down through the Walker.
*/
typedef struct IdxExprTrans {
Expr *pIdxExpr; /* The index expression */
int iTabCur; /* The cursor of the corresponding table */
int iIdxCur; /* The cursor for the index */
int iIdxCol; /* The column for the index */
int iTabCol; /* The column for the table */
} IdxExprTrans;
/* The walker node callback used to transform matching expressions into
** a reference to an index column for an index on an expression.
**
** If pExpr matches, then transform it into a reference to the index column
** that contains the value of pExpr.
*/
static int whereIndexExprTransNode(Walker *p, Expr *pExpr){
IdxExprTrans *pX = p->u.pIdxTrans;
if( sqlite3ExprCompare(0, pExpr, pX->pIdxExpr, pX->iTabCur)==0 ){
pExpr->affExpr = sqlite3ExprAffinity(pExpr);
pExpr->op = TK_COLUMN;
pExpr->iTable = pX->iIdxCur;
pExpr->iColumn = pX->iIdxCol;
pExpr->y.pTab = 0;
return WRC_Prune;
}else{
return WRC_Continue;
}
}
#ifndef SQLITE_OMIT_GENERATED_COLUMNS
/* A walker node callback that translates a column reference to a table
** into a corresponding column reference of an index.
*/
static int whereIndexExprTransColumn(Walker *p, Expr *pExpr){
if( pExpr->op==TK_COLUMN ){
IdxExprTrans *pX = p->u.pIdxTrans;
if( pExpr->iTable==pX->iTabCur && pExpr->iColumn==pX->iTabCol ){
pExpr->iTable = pX->iIdxCur;
pExpr->iColumn = pX->iIdxCol;
pExpr->y.pTab = 0;
}
}
return WRC_Continue;
}
#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
/*
** For an indexes on expression X, locate every instance of expression X
** in pExpr and change that subexpression into a reference to the appropriate
** column of the index.
**
** 2019-10-24: Updated to also translate references to a VIRTUAL column in
** the table into references to the corresponding (stored) column of the
** index.
*/
static void whereIndexExprTrans(
Index *pIdx, /* The Index */
int iTabCur, /* Cursor of the table that is being indexed */
int iIdxCur, /* Cursor of the index itself */
WhereInfo *pWInfo /* Transform expressions in this WHERE clause */
){
int iIdxCol; /* Column number of the index */
ExprList *aColExpr; /* Expressions that are indexed */
Table *pTab;
Walker w;
IdxExprTrans x;
aColExpr = pIdx->aColExpr;
if( aColExpr==0 && !pIdx->bHasVCol ){
/* The index does not reference any expressions or virtual columns
** so no translations are needed. */
return;
}
pTab = pIdx->pTable;
memset(&w, 0, sizeof(w));
w.u.pIdxTrans = &x;
x.iTabCur = iTabCur;
x.iIdxCur = iIdxCur;
for(iIdxCol=0; iIdxCol<pIdx->nColumn; iIdxCol++){
i16 iRef = pIdx->aiColumn[iIdxCol];
if( iRef==XN_EXPR ){
assert( aColExpr->a[iIdxCol].pExpr!=0 );
x.pIdxExpr = aColExpr->a[iIdxCol].pExpr;
w.xExprCallback = whereIndexExprTransNode;
#ifndef SQLITE_OMIT_GENERATED_COLUMNS
}else if( iRef>=0 && (pTab->aCol[iRef].colFlags & COLFLAG_VIRTUAL)!=0 ){
x.iTabCol = iRef;
w.xExprCallback = whereIndexExprTransColumn;
#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
}else{
continue;
}
x.iIdxCol = iIdxCol;
sqlite3WalkExpr(&w, pWInfo->pWhere);
sqlite3WalkExprList(&w, pWInfo->pOrderBy);
sqlite3WalkExprList(&w, pWInfo->pResultSet);
}
}
/*
** The pTruth expression is always true because it is the WHERE clause
** a partial index that is driving a query loop. Look through all of the
** WHERE clause terms on the query, and if any of those terms must be
** true because pTruth is true, then mark those WHERE clause terms as
** coded.
*/
static void whereApplyPartialIndexConstraints(
Expr *pTruth,
int iTabCur,
WhereClause *pWC
){
int i;
WhereTerm *pTerm;
while( pTruth->op==TK_AND ){
whereApplyPartialIndexConstraints(pTruth->pLeft, iTabCur, pWC);
pTruth = pTruth->pRight;
}
for(i=0, pTerm=pWC->a; i<pWC->nTerm; i++, pTerm++){
Expr *pExpr;
if( pTerm->wtFlags & TERM_CODED ) continue;
pExpr = pTerm->pExpr;
if( sqlite3ExprCompare(0, pExpr, pTruth, iTabCur)==0 ){
pTerm->wtFlags |= TERM_CODED;
}
}
}
/*
** Generate code for the start of the iLevel-th loop in the WHERE clause
** implementation described by pWInfo.
*/
Bitmask sqlite3WhereCodeOneLoopStart(
Parse *pParse, /* Parsing context */
Vdbe *v, /* Prepared statement under construction */
WhereInfo *pWInfo, /* Complete information about the WHERE clause */
int iLevel, /* Which level of pWInfo->a[] should be coded */
WhereLevel *pLevel, /* The current level pointer */
Bitmask notReady /* Which tables are currently available */
){
int j, k; /* Loop counters */
int iCur; /* The VDBE cursor for the table */
int addrNxt; /* Where to jump to continue with the next IN case */
int bRev; /* True if we need to scan in reverse order */
WhereLoop *pLoop; /* The WhereLoop object being coded */
WhereClause *pWC; /* Decomposition of the entire WHERE clause */
WhereTerm *pTerm; /* A WHERE clause term */
sqlite3 *db; /* Database connection */
struct SrcList_item *pTabItem; /* FROM clause term being coded */
int addrBrk; /* Jump here to break out of the loop */
int addrHalt; /* addrBrk for the outermost loop */
int addrCont; /* Jump here to continue with next cycle */
int iRowidReg = 0; /* Rowid is stored in this register, if not zero */
int iReleaseReg = 0; /* Temp register to free before returning */
Index *pIdx = 0; /* Index used by loop (if any) */
int iLoop; /* Iteration of constraint generator loop */
pWC = &pWInfo->sWC;
db = pParse->db;
pLoop = pLevel->pWLoop;
pTabItem = &pWInfo->pTabList->a[pLevel->iFrom];
iCur = pTabItem->iCursor;
pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur);
bRev = (pWInfo->revMask>>iLevel)&1;
VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName));
/* Create labels for the "break" and "continue" instructions
** for the current loop. Jump to addrBrk to break out of a loop.
** Jump to cont to go immediately to the next iteration of the
** loop.
**
** When there is an IN operator, we also have a "addrNxt" label that
** means to continue with the next IN value combination. When
** there are no IN operators in the constraints, the "addrNxt" label
** is the same as "addrBrk".
*/
addrBrk = pLevel->addrBrk = pLevel->addrNxt = sqlite3VdbeMakeLabel(pParse);
addrCont = pLevel->addrCont = sqlite3VdbeMakeLabel(pParse);
/* If this is the right table of a LEFT OUTER JOIN, allocate and
** initialize a memory cell that records if this table matches any
** row of the left table of the join.
*/
assert( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)
|| pLevel->iFrom>0 || (pTabItem[0].fg.jointype & JT_LEFT)==0
);
if( pLevel->iFrom>0 && (pTabItem[0].fg.jointype & JT_LEFT)!=0 ){
pLevel->iLeftJoin = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Integer, 0, pLevel->iLeftJoin);
VdbeComment((v, "init LEFT JOIN no-match flag"));
}
/* Compute a safe address to jump to if we discover that the table for
** this loop is empty and can never contribute content. */
for(j=iLevel; j>0 && pWInfo->a[j].iLeftJoin==0; j--){}
addrHalt = pWInfo->a[j].addrBrk;
/* Special case of a FROM clause subquery implemented as a co-routine */
if( pTabItem->fg.viaCoroutine ){
int regYield = pTabItem->regReturn;
sqlite3VdbeAddOp3(v, OP_InitCoroutine, regYield, 0, pTabItem->addrFillSub);
pLevel->p2 = sqlite3VdbeAddOp2(v, OP_Yield, regYield, addrBrk);
VdbeCoverage(v);
VdbeComment((v, "next row of %s", pTabItem->pTab->zName));
pLevel->op = OP_Goto;
}else
#ifndef SQLITE_OMIT_VIRTUALTABLE
if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)!=0 ){
/* Case 1: The table is a virtual-table. Use the VFilter and VNext
** to access the data.
*/
int iReg; /* P3 Value for OP_VFilter */
int addrNotFound;
int nConstraint = pLoop->nLTerm;
int iIn; /* Counter for IN constraints */
iReg = sqlite3GetTempRange(pParse, nConstraint+2);
addrNotFound = pLevel->addrBrk;
for(j=0; j<nConstraint; j++){
int iTarget = iReg+j+2;
pTerm = pLoop->aLTerm[j];
if( NEVER(pTerm==0) ) continue;
if( pTerm->eOperator & WO_IN ){
codeEqualityTerm(pParse, pTerm, pLevel, j, bRev, iTarget);
addrNotFound = pLevel->addrNxt;
}else{
Expr *pRight = pTerm->pExpr->pRight;
codeExprOrVector(pParse, pRight, iTarget, 1);
}
}
sqlite3VdbeAddOp2(v, OP_Integer, pLoop->u.vtab.idxNum, iReg);
sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1);
sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg,
pLoop->u.vtab.idxStr,
pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC);
VdbeCoverage(v);
pLoop->u.vtab.needFree = 0;
pLevel->p1 = iCur;
pLevel->op = pWInfo->eOnePass ? OP_Noop : OP_VNext;
pLevel->p2 = sqlite3VdbeCurrentAddr(v);
iIn = pLevel->u.in.nIn;
for(j=nConstraint-1; j>=0; j--){
pTerm = pLoop->aLTerm[j];
if( (pTerm->eOperator & WO_IN)!=0 ) iIn--;
if( j<16 && (pLoop->u.vtab.omitMask>>j)&1 ){
disableTerm(pLevel, pTerm);
}else if( (pTerm->eOperator & WO_IN)!=0
&& sqlite3ExprVectorSize(pTerm->pExpr->pLeft)==1
){
Expr *pCompare; /* The comparison operator */
Expr *pRight; /* RHS of the comparison */
VdbeOp *pOp; /* Opcode to access the value of the IN constraint */
/* Reload the constraint value into reg[iReg+j+2]. The same value
** was loaded into the same register prior to the OP_VFilter, but
** the xFilter implementation might have changed the datatype or
** encoding of the value in the register, so it *must* be reloaded. */
assert( pLevel->u.in.aInLoop!=0 || db->mallocFailed );
if( !db->mallocFailed ){
assert( iIn>=0 && iIn<pLevel->u.in.nIn );
pOp = sqlite3VdbeGetOp(v, pLevel->u.in.aInLoop[iIn].addrInTop);
assert( pOp->opcode==OP_Column || pOp->opcode==OP_Rowid );
assert( pOp->opcode!=OP_Column || pOp->p3==iReg+j+2 );
assert( pOp->opcode!=OP_Rowid || pOp->p2==iReg+j+2 );
testcase( pOp->opcode==OP_Rowid );
sqlite3VdbeAddOp3(v, pOp->opcode, pOp->p1, pOp->p2, pOp->p3);
}
/* Generate code that will continue to the next row if
** the IN constraint is not satisfied */
pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0);
assert( pCompare!=0 || db->mallocFailed );
if( pCompare ){
pCompare->pLeft = pTerm->pExpr->pLeft;
pCompare->pRight = pRight = sqlite3Expr(db, TK_REGISTER, 0);
if( pRight ){
pRight->iTable = iReg+j+2;
sqlite3ExprIfFalse(pParse, pCompare, pLevel->addrCont, 0);
}
pCompare->pLeft = 0;
sqlite3ExprDelete(db, pCompare);
}
}
}
assert( iIn==0 || db->mallocFailed );
/* These registers need to be preserved in case there is an IN operator
** loop. So we could deallocate the registers here (and potentially
** reuse them later) if (pLoop->wsFlags & WHERE_IN_ABLE)==0. But it seems
** simpler and safer to simply not reuse the registers.
**
** sqlite3ReleaseTempRange(pParse, iReg, nConstraint+2);
*/
}else
#endif /* SQLITE_OMIT_VIRTUALTABLE */
if( (pLoop->wsFlags & WHERE_IPK)!=0
&& (pLoop->wsFlags & (WHERE_COLUMN_IN|WHERE_COLUMN_EQ))!=0
){
/* Case 2: We can directly reference a single row using an
** equality comparison against the ROWID field. Or
** we reference multiple rows using a "rowid IN (...)"
** construct.
*/
assert( pLoop->u.btree.nEq==1 );
pTerm = pLoop->aLTerm[0];
assert( pTerm!=0 );
assert( pTerm->pExpr!=0 );
testcase( pTerm->wtFlags & TERM_VIRTUAL );
iReleaseReg = ++pParse->nMem;
iRowidReg = codeEqualityTerm(pParse, pTerm, pLevel, 0, bRev, iReleaseReg);
if( iRowidReg!=iReleaseReg ) sqlite3ReleaseTempReg(pParse, iReleaseReg);
addrNxt = pLevel->addrNxt;
sqlite3VdbeAddOp3(v, OP_SeekRowid, iCur, addrNxt, iRowidReg);
VdbeCoverage(v);
pLevel->op = OP_Noop;
if( (pTerm->prereqAll & pLevel->notReady)==0 ){
pTerm->wtFlags |= TERM_CODED;
}
}else if( (pLoop->wsFlags & WHERE_IPK)!=0
&& (pLoop->wsFlags & WHERE_COLUMN_RANGE)!=0
){
/* Case 3: We have an inequality comparison against the ROWID field.
*/
int testOp = OP_Noop;
int start;
int memEndValue = 0;
WhereTerm *pStart, *pEnd;
j = 0;
pStart = pEnd = 0;
if( pLoop->wsFlags & WHERE_BTM_LIMIT ) pStart = pLoop->aLTerm[j++];
if( pLoop->wsFlags & WHERE_TOP_LIMIT ) pEnd = pLoop->aLTerm[j++];
assert( pStart!=0 || pEnd!=0 );
if( bRev ){
pTerm = pStart;
pStart = pEnd;
pEnd = pTerm;
}
codeCursorHint(pTabItem, pWInfo, pLevel, pEnd);
if( pStart ){
Expr *pX; /* The expression that defines the start bound */
int r1, rTemp; /* Registers for holding the start boundary */
int op; /* Cursor seek operation */
/* The following constant maps TK_xx codes into corresponding
** seek opcodes. It depends on a particular ordering of TK_xx
*/
const u8 aMoveOp[] = {
/* TK_GT */ OP_SeekGT,
/* TK_LE */ OP_SeekLE,
/* TK_LT */ OP_SeekLT,
/* TK_GE */ OP_SeekGE
};
assert( TK_LE==TK_GT+1 ); /* Make sure the ordering.. */
assert( TK_LT==TK_GT+2 ); /* ... of the TK_xx values... */
assert( TK_GE==TK_GT+3 ); /* ... is correcct. */
assert( (pStart->wtFlags & TERM_VNULL)==0 );
testcase( pStart->wtFlags & TERM_VIRTUAL );
pX = pStart->pExpr;
assert( pX!=0 );
testcase( pStart->leftCursor!=iCur ); /* transitive constraints */
if( sqlite3ExprIsVector(pX->pRight) ){
r1 = rTemp = sqlite3GetTempReg(pParse);
codeExprOrVector(pParse, pX->pRight, r1, 1);
testcase( pX->op==TK_GT );
testcase( pX->op==TK_GE );
testcase( pX->op==TK_LT );
testcase( pX->op==TK_LE );
op = aMoveOp[((pX->op - TK_GT - 1) & 0x3) | 0x1];
assert( pX->op!=TK_GT || op==OP_SeekGE );
assert( pX->op!=TK_GE || op==OP_SeekGE );
assert( pX->op!=TK_LT || op==OP_SeekLE );
assert( pX->op!=TK_LE || op==OP_SeekLE );
}else{
r1 = sqlite3ExprCodeTemp(pParse, pX->pRight, &rTemp);
disableTerm(pLevel, pStart);
op = aMoveOp[(pX->op - TK_GT)];
}
sqlite3VdbeAddOp3(v, op, iCur, addrBrk, r1);
VdbeComment((v, "pk"));
VdbeCoverageIf(v, pX->op==TK_GT);
VdbeCoverageIf(v, pX->op==TK_LE);
VdbeCoverageIf(v, pX->op==TK_LT);
VdbeCoverageIf(v, pX->op==TK_GE);
sqlite3ReleaseTempReg(pParse, rTemp);
}else{
sqlite3VdbeAddOp2(v, bRev ? OP_Last : OP_Rewind, iCur, addrHalt);
VdbeCoverageIf(v, bRev==0);
VdbeCoverageIf(v, bRev!=0);
}
if( pEnd ){
Expr *pX;
pX = pEnd->pExpr;
assert( pX!=0 );
assert( (pEnd->wtFlags & TERM_VNULL)==0 );
testcase( pEnd->leftCursor!=iCur ); /* Transitive constraints */
testcase( pEnd->wtFlags & TERM_VIRTUAL );
memEndValue = ++pParse->nMem;
codeExprOrVector(pParse, pX->pRight, memEndValue, 1);
if( 0==sqlite3ExprIsVector(pX->pRight)
&& (pX->op==TK_LT || pX->op==TK_GT)
){
testOp = bRev ? OP_Le : OP_Ge;
}else{
testOp = bRev ? OP_Lt : OP_Gt;
}
if( 0==sqlite3ExprIsVector(pX->pRight) ){
disableTerm(pLevel, pEnd);
}
}
start = sqlite3VdbeCurrentAddr(v);
pLevel->op = bRev ? OP_Prev : OP_Next;
pLevel->p1 = iCur;
pLevel->p2 = start;
assert( pLevel->p5==0 );
if( testOp!=OP_Noop ){
iRowidReg = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Rowid, iCur, iRowidReg);
sqlite3VdbeAddOp3(v, testOp, memEndValue, addrBrk, iRowidReg);
VdbeCoverageIf(v, testOp==OP_Le);
VdbeCoverageIf(v, testOp==OP_Lt);
VdbeCoverageIf(v, testOp==OP_Ge);
VdbeCoverageIf(v, testOp==OP_Gt);
sqlite3VdbeChangeP5(v, SQLITE_AFF_NUMERIC | SQLITE_JUMPIFNULL);
}
}else if( pLoop->wsFlags & WHERE_INDEXED ){
/* Case 4: A scan using an index.
**
** The WHERE clause may contain zero or more equality
** terms ("==" or "IN" operators) that refer to the N
** left-most columns of the index. It may also contain
** inequality constraints (>, <, >= or <=) on the indexed
** column that immediately follows the N equalities. Only
** the right-most column can be an inequality - the rest must
** use the "==" and "IN" operators. For example, if the
** index is on (x,y,z), then the following clauses are all
** optimized:
**
** x=5
** x=5 AND y=10
** x=5 AND y<10
** x=5 AND y>5 AND y<10
** x=5 AND y=5 AND z<=10
**
** The z<10 term of the following cannot be used, only
** the x=5 term:
**
** x=5 AND z<10
**
** N may be zero if there are inequality constraints.
** If there are no inequality constraints, then N is at
** least one.
**
** This case is also used when there are no WHERE clause
** constraints but an index is selected anyway, in order
** to force the output order to conform to an ORDER BY.
*/
static const u8 aStartOp[] = {
0,
0,
OP_Rewind, /* 2: (!start_constraints && startEq && !bRev) */
OP_Last, /* 3: (!start_constraints && startEq && bRev) */
OP_SeekGT, /* 4: (start_constraints && !startEq && !bRev) */
OP_SeekLT, /* 5: (start_constraints && !startEq && bRev) */
OP_SeekGE, /* 6: (start_constraints && startEq && !bRev) */
OP_SeekLE /* 7: (start_constraints && startEq && bRev) */
};
static const u8 aEndOp[] = {
OP_IdxGE, /* 0: (end_constraints && !bRev && !endEq) */
OP_IdxGT, /* 1: (end_constraints && !bRev && endEq) */
OP_IdxLE, /* 2: (end_constraints && bRev && !endEq) */
OP_IdxLT, /* 3: (end_constraints && bRev && endEq) */
};
u16 nEq = pLoop->u.btree.nEq; /* Number of == or IN terms */
u16 nBtm = pLoop->u.btree.nBtm; /* Length of BTM vector */
u16 nTop = pLoop->u.btree.nTop; /* Length of TOP vector */
int regBase; /* Base register holding constraint values */
WhereTerm *pRangeStart = 0; /* Inequality constraint at range start */
WhereTerm *pRangeEnd = 0; /* Inequality constraint at range end */
int startEq; /* True if range start uses ==, >= or <= */
int endEq; /* True if range end uses ==, >= or <= */
int start_constraints; /* Start of range is constrained */
int nConstraint; /* Number of constraint terms */
int iIdxCur; /* The VDBE cursor for the index */
int nExtraReg = 0; /* Number of extra registers needed */
int op; /* Instruction opcode */
char *zStartAff; /* Affinity for start of range constraint */
char *zEndAff = 0; /* Affinity for end of range constraint */
u8 bSeekPastNull = 0; /* True to seek past initial nulls */
u8 bStopAtNull = 0; /* Add condition to terminate at NULLs */
int omitTable; /* True if we use the index only */
int regBignull = 0; /* big-null flag register */
pIdx = pLoop->u.btree.pIndex;
iIdxCur = pLevel->iIdxCur;
assert( nEq>=pLoop->nSkip );
/* Find any inequality constraint terms for the start and end
** of the range.
*/
j = nEq;
if( pLoop->wsFlags & WHERE_BTM_LIMIT ){
pRangeStart = pLoop->aLTerm[j++];
nExtraReg = MAX(nExtraReg, pLoop->u.btree.nBtm);
/* Like optimization range constraints always occur in pairs */
assert( (pRangeStart->wtFlags & TERM_LIKEOPT)==0 ||
(pLoop->wsFlags & WHERE_TOP_LIMIT)!=0 );
}
if( pLoop->wsFlags & WHERE_TOP_LIMIT ){
pRangeEnd = pLoop->aLTerm[j++];
nExtraReg = MAX(nExtraReg, pLoop->u.btree.nTop);
#ifndef SQLITE_LIKE_DOESNT_MATCH_BLOBS
if( (pRangeEnd->wtFlags & TERM_LIKEOPT)!=0 ){
assert( pRangeStart!=0 ); /* LIKE opt constraints */
assert( pRangeStart->wtFlags & TERM_LIKEOPT ); /* occur in pairs */
pLevel->iLikeRepCntr = (u32)++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Integer, 1, (int)pLevel->iLikeRepCntr);
VdbeComment((v, "LIKE loop counter"));
pLevel->addrLikeRep = sqlite3VdbeCurrentAddr(v);
/* iLikeRepCntr actually stores 2x the counter register number. The
** bottom bit indicates whether the search order is ASC or DESC. */
testcase( bRev );
testcase( pIdx->aSortOrder[nEq]==SQLITE_SO_DESC );
assert( (bRev & ~1)==0 );
pLevel->iLikeRepCntr <<=1;
pLevel->iLikeRepCntr |= bRev ^ (pIdx->aSortOrder[nEq]==SQLITE_SO_DESC);
}
#endif
if( pRangeStart==0 ){
j = pIdx->aiColumn[nEq];
if( (j>=0 && pIdx->pTable->aCol[j].notNull==0) || j==XN_EXPR ){
bSeekPastNull = 1;
}
}
}
assert( pRangeEnd==0 || (pRangeEnd->wtFlags & TERM_VNULL)==0 );
/* If the WHERE_BIGNULL_SORT flag is set, then index column nEq uses
** a non-default "big-null" sort (either ASC NULLS LAST or DESC NULLS
** FIRST). In both cases separate ordered scans are made of those
** index entries for which the column is null and for those for which
** it is not. For an ASC sort, the non-NULL entries are scanned first.
** For DESC, NULL entries are scanned first.
*/
if( (pLoop->wsFlags & (WHERE_TOP_LIMIT|WHERE_BTM_LIMIT))==0
&& (pLoop->wsFlags & WHERE_BIGNULL_SORT)!=0
){
assert( bSeekPastNull==0 && nExtraReg==0 && nBtm==0 && nTop==0 );
assert( pRangeEnd==0 && pRangeStart==0 );
assert( pLoop->nSkip==0 );
nExtraReg = 1;
bSeekPastNull = 1;
pLevel->regBignull = regBignull = ++pParse->nMem;
pLevel->addrBignull = sqlite3VdbeMakeLabel(pParse);
}
/* If we are doing a reverse order scan on an ascending index, or
** a forward order scan on a descending index, interchange the
** start and end terms (pRangeStart and pRangeEnd).
*/
if( (nEq<pIdx->nKeyCol && bRev==(pIdx->aSortOrder[nEq]==SQLITE_SO_ASC))
|| (bRev && pIdx->nKeyCol==nEq)
){
SWAP(WhereTerm *, pRangeEnd, pRangeStart);
SWAP(u8, bSeekPastNull, bStopAtNull);
SWAP(u8, nBtm, nTop);
}
/* Generate code to evaluate all constraint terms using == or IN
** and store the values of those terms in an array of registers
** starting at regBase.
*/
codeCursorHint(pTabItem, pWInfo, pLevel, pRangeEnd);
regBase = codeAllEqualityTerms(pParse,pLevel,bRev,nExtraReg,&zStartAff);
assert( zStartAff==0 || sqlite3Strlen30(zStartAff)>=nEq );
if( zStartAff && nTop ){
zEndAff = sqlite3DbStrDup(db, &zStartAff[nEq]);
}
addrNxt = (regBignull ? pLevel->addrBignull : pLevel->addrNxt);
testcase( pRangeStart && (pRangeStart->eOperator & WO_LE)!=0 );
testcase( pRangeStart && (pRangeStart->eOperator & WO_GE)!=0 );
testcase( pRangeEnd && (pRangeEnd->eOperator & WO_LE)!=0 );
testcase( pRangeEnd && (pRangeEnd->eOperator & WO_GE)!=0 );
startEq = !pRangeStart || pRangeStart->eOperator & (WO_LE|WO_GE);
endEq = !pRangeEnd || pRangeEnd->eOperator & (WO_LE|WO_GE);
start_constraints = pRangeStart || nEq>0;
/* Seek the index cursor to the start of the range. */
nConstraint = nEq;
if( pRangeStart ){
Expr *pRight = pRangeStart->pExpr->pRight;
codeExprOrVector(pParse, pRight, regBase+nEq, nBtm);
whereLikeOptimizationStringFixup(v, pLevel, pRangeStart);
if( (pRangeStart->wtFlags & TERM_VNULL)==0
&& sqlite3ExprCanBeNull(pRight)
){
sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt);
VdbeCoverage(v);
}
if( zStartAff ){
updateRangeAffinityStr(pRight, nBtm, &zStartAff[nEq]);
}
nConstraint += nBtm;
testcase( pRangeStart->wtFlags & TERM_VIRTUAL );
if( sqlite3ExprIsVector(pRight)==0 ){
disableTerm(pLevel, pRangeStart);
}else{
startEq = 1;
}
bSeekPastNull = 0;
}else if( bSeekPastNull ){
startEq = 0;
sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
start_constraints = 1;
nConstraint++;
}else if( regBignull ){
sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
start_constraints = 1;
nConstraint++;
}
codeApplyAffinity(pParse, regBase, nConstraint - bSeekPastNull, zStartAff);
if( pLoop->nSkip>0 && nConstraint==pLoop->nSkip ){
/* The skip-scan logic inside the call to codeAllEqualityConstraints()
** above has already left the cursor sitting on the correct row,
** so no further seeking is needed */
}else{
if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){
sqlite3VdbeAddOp1(v, OP_SeekHit, iIdxCur);
}
if( regBignull ){
sqlite3VdbeAddOp2(v, OP_Integer, 1, regBignull);
VdbeComment((v, "NULL-scan pass ctr"));
}
op = aStartOp[(start_constraints<<2) + (startEq<<1) + bRev];
assert( op!=0 );
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
VdbeCoverage(v);
VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last );
VdbeCoverageIf(v, op==OP_SeekGT); testcase( op==OP_SeekGT );
VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE );
VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE );
VdbeCoverageIf(v, op==OP_SeekLT); testcase( op==OP_SeekLT );
assert( bSeekPastNull==0 || bStopAtNull==0 );
if( regBignull ){
assert( bSeekPastNull==1 || bStopAtNull==1 );
assert( bSeekPastNull==!bStopAtNull );
assert( bStopAtNull==startEq );
sqlite3VdbeAddOp2(v, OP_Goto, 0, sqlite3VdbeCurrentAddr(v)+2);
op = aStartOp[(nConstraint>1)*4 + 2 + bRev];
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase,
nConstraint-startEq);
VdbeCoverage(v);
VdbeCoverageIf(v, op==OP_Rewind); testcase( op==OP_Rewind );
VdbeCoverageIf(v, op==OP_Last); testcase( op==OP_Last );
VdbeCoverageIf(v, op==OP_SeekGE); testcase( op==OP_SeekGE );
VdbeCoverageIf(v, op==OP_SeekLE); testcase( op==OP_SeekLE );
assert( op==OP_Rewind || op==OP_Last || op==OP_SeekGE || op==OP_SeekLE);
}
}
/* Load the value for the inequality constraint at the end of the
** range (if any).
*/
nConstraint = nEq;
if( pRangeEnd ){
Expr *pRight = pRangeEnd->pExpr->pRight;
codeExprOrVector(pParse, pRight, regBase+nEq, nTop);
whereLikeOptimizationStringFixup(v, pLevel, pRangeEnd);
if( (pRangeEnd->wtFlags & TERM_VNULL)==0
&& sqlite3ExprCanBeNull(pRight)
){
sqlite3VdbeAddOp2(v, OP_IsNull, regBase+nEq, addrNxt);
VdbeCoverage(v);
}
if( zEndAff ){
updateRangeAffinityStr(pRight, nTop, zEndAff);
codeApplyAffinity(pParse, regBase+nEq, nTop, zEndAff);
}else{
assert( pParse->db->mallocFailed );
}
nConstraint += nTop;
testcase( pRangeEnd->wtFlags & TERM_VIRTUAL );
if( sqlite3ExprIsVector(pRight)==0 ){
disableTerm(pLevel, pRangeEnd);
}else{
endEq = 1;
}
}else if( bStopAtNull ){
if( regBignull==0 ){
sqlite3VdbeAddOp2(v, OP_Null, 0, regBase+nEq);
endEq = 0;
}
nConstraint++;
}
sqlite3DbFree(db, zStartAff);
sqlite3DbFree(db, zEndAff);
/* Top of the loop body */
pLevel->p2 = sqlite3VdbeCurrentAddr(v);
/* Check if the index cursor is past the end of the range. */
if( nConstraint ){
if( regBignull ){
/* Except, skip the end-of-range check while doing the NULL-scan */
sqlite3VdbeAddOp2(v, OP_IfNot, regBignull, sqlite3VdbeCurrentAddr(v)+3);
VdbeComment((v, "If NULL-scan 2nd pass"));
VdbeCoverage(v);
}
op = aEndOp[bRev*2 + endEq];
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase, nConstraint);
testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
}
if( regBignull ){
/* During a NULL-scan, check to see if we have reached the end of
** the NULLs */
assert( bSeekPastNull==!bStopAtNull );
assert( bSeekPastNull+bStopAtNull==1 );
assert( nConstraint+bSeekPastNull>0 );
sqlite3VdbeAddOp2(v, OP_If, regBignull, sqlite3VdbeCurrentAddr(v)+2);
VdbeComment((v, "If NULL-scan 1st pass"));
VdbeCoverage(v);
op = aEndOp[bRev*2 + bSeekPastNull];
sqlite3VdbeAddOp4Int(v, op, iIdxCur, addrNxt, regBase,
nConstraint+bSeekPastNull);
testcase( op==OP_IdxGT ); VdbeCoverageIf(v, op==OP_IdxGT );
testcase( op==OP_IdxGE ); VdbeCoverageIf(v, op==OP_IdxGE );
testcase( op==OP_IdxLT ); VdbeCoverageIf(v, op==OP_IdxLT );
testcase( op==OP_IdxLE ); VdbeCoverageIf(v, op==OP_IdxLE );
}
if( pLoop->wsFlags & WHERE_IN_EARLYOUT ){
sqlite3VdbeAddOp2(v, OP_SeekHit, iIdxCur, 1);
}
/* Seek the table cursor, if required */
omitTable = (pLoop->wsFlags & WHERE_IDX_ONLY)!=0
&& (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0;
if( omitTable ){
/* pIdx is a covering index. No need to access the main table. */
}else if( HasRowid(pIdx->pTable) ){
if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE) || (
(pWInfo->wctrlFlags & WHERE_SEEK_UNIQ_TABLE)
&& (pWInfo->eOnePass==ONEPASS_SINGLE)
)){
iRowidReg = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg);
sqlite3VdbeAddOp3(v, OP_NotExists, iCur, 0, iRowidReg);
VdbeCoverage(v);
}else{
codeDeferredSeek(pWInfo, pIdx, iCur, iIdxCur);
}
}else if( iCur!=iIdxCur ){
Index *pPk = sqlite3PrimaryKeyIndex(pIdx->pTable);
iRowidReg = sqlite3GetTempRange(pParse, pPk->nKeyCol);
for(j=0; j<pPk->nKeyCol; j++){
k = sqlite3TableColumnToIndex(pIdx, pPk->aiColumn[j]);
sqlite3VdbeAddOp3(v, OP_Column, iIdxCur, k, iRowidReg+j);
}
sqlite3VdbeAddOp4Int(v, OP_NotFound, iCur, addrCont,
iRowidReg, pPk->nKeyCol); VdbeCoverage(v);
}
if( pLevel->iLeftJoin==0 ){
/* If pIdx is an index on one or more expressions, then look through
** all the expressions in pWInfo and try to transform matching expressions
** into reference to index columns. Also attempt to translate references
** to virtual columns in the table into references to (stored) columns
** of the index.
**
** Do not do this for the RHS of a LEFT JOIN. This is because the
** expression may be evaluated after OP_NullRow has been executed on
** the cursor. In this case it is important to do the full evaluation,
** as the result of the expression may not be NULL, even if all table
** column values are. https://www.sqlite.org/src/info/7fa8049685b50b5a
**
** Also, do not do this when processing one index an a multi-index
** OR clause, since the transformation will become invalid once we
** move forward to the next index.
** https://sqlite.org/src/info/4e8e4857d32d401f
*/
if( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 ){
whereIndexExprTrans(pIdx, iCur, iIdxCur, pWInfo);
}
/* If a partial index is driving the loop, try to eliminate WHERE clause
** terms from the query that must be true due to the WHERE clause of
** the partial index.
**
** 2019-11-02 ticket 623eff57e76d45f6: This optimization does not work
** for a LEFT JOIN.
*/
if( pIdx->pPartIdxWhere ){
whereApplyPartialIndexConstraints(pIdx->pPartIdxWhere, iCur, pWC);
}
}else{
testcase( pIdx->pPartIdxWhere );
/* The following assert() is not a requirement, merely an observation:
** The OR-optimization doesn't work for the right hand table of
** a LEFT JOIN: */
assert( (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0 );
}
/* Record the instruction used to terminate the loop. */
if( pLoop->wsFlags & WHERE_ONEROW ){
pLevel->op = OP_Noop;
}else if( bRev ){
pLevel->op = OP_Prev;
}else{
pLevel->op = OP_Next;
}
pLevel->p1 = iIdxCur;
pLevel->p3 = (pLoop->wsFlags&WHERE_UNQ_WANTED)!=0 ? 1:0;
if( (pLoop->wsFlags & WHERE_CONSTRAINT)==0 ){
pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
}else{
assert( pLevel->p5==0 );
}
if( omitTable ) pIdx = 0;
}else
#ifndef SQLITE_OMIT_OR_OPTIMIZATION
if( pLoop->wsFlags & WHERE_MULTI_OR ){
/* Case 5: Two or more separately indexed terms connected by OR
**
** Example:
**
** CREATE TABLE t1(a,b,c,d);
** CREATE INDEX i1 ON t1(a);
** CREATE INDEX i2 ON t1(b);
** CREATE INDEX i3 ON t1(c);
**
** SELECT * FROM t1 WHERE a=5 OR b=7 OR (c=11 AND d=13)
**
** In the example, there are three indexed terms connected by OR.
** The top of the loop looks like this:
**
** Null 1 # Zero the rowset in reg 1
**
** Then, for each indexed term, the following. The arguments to
** RowSetTest are such that the rowid of the current row is inserted
** into the RowSet. If it is already present, control skips the
** Gosub opcode and jumps straight to the code generated by WhereEnd().
**
** sqlite3WhereBegin(<term>)
** RowSetTest # Insert rowid into rowset
** Gosub 2 A
** sqlite3WhereEnd()
**
** Following the above, code to terminate the loop. Label A, the target
** of the Gosub above, jumps to the instruction right after the Goto.
**
** Null 1 # Zero the rowset in reg 1
** Goto B # The loop is finished.
**
** A: <loop body> # Return data, whatever.
**
** Return 2 # Jump back to the Gosub
**
** B: <after the loop>
**
** Added 2014-05-26: If the table is a WITHOUT ROWID table, then
** use an ephemeral index instead of a RowSet to record the primary
** keys of the rows we have already seen.
**
*/
WhereClause *pOrWc; /* The OR-clause broken out into subterms */
SrcList *pOrTab; /* Shortened table list or OR-clause generation */
Index *pCov = 0; /* Potential covering index (or NULL) */
int iCovCur = pParse->nTab++; /* Cursor used for index scans (if any) */
int regReturn = ++pParse->nMem; /* Register used with OP_Gosub */
int regRowset = 0; /* Register for RowSet object */
int regRowid = 0; /* Register holding rowid */
int iLoopBody = sqlite3VdbeMakeLabel(pParse);/* Start of loop body */
int iRetInit; /* Address of regReturn init */
int untestedTerms = 0; /* Some terms not completely tested */
int ii; /* Loop counter */
u16 wctrlFlags; /* Flags for sub-WHERE clause */
Expr *pAndExpr = 0; /* An ".. AND (...)" expression */
Table *pTab = pTabItem->pTab;
pTerm = pLoop->aLTerm[0];
assert( pTerm!=0 );
assert( pTerm->eOperator & WO_OR );
assert( (pTerm->wtFlags & TERM_ORINFO)!=0 );
pOrWc = &pTerm->u.pOrInfo->wc;
pLevel->op = OP_Return;
pLevel->p1 = regReturn;
/* Set up a new SrcList in pOrTab containing the table being scanned
** by this loop in the a[0] slot and all notReady tables in a[1..] slots.
** This becomes the SrcList in the recursive call to sqlite3WhereBegin().
*/
if( pWInfo->nLevel>1 ){
int nNotReady; /* The number of notReady tables */
struct SrcList_item *origSrc; /* Original list of tables */
nNotReady = pWInfo->nLevel - iLevel - 1;
pOrTab = sqlite3StackAllocRaw(db,
sizeof(*pOrTab)+ nNotReady*sizeof(pOrTab->a[0]));
if( pOrTab==0 ) return notReady;
pOrTab->nAlloc = (u8)(nNotReady + 1);
pOrTab->nSrc = pOrTab->nAlloc;
memcpy(pOrTab->a, pTabItem, sizeof(*pTabItem));
origSrc = pWInfo->pTabList->a;
for(k=1; k<=nNotReady; k++){
memcpy(&pOrTab->a[k], &origSrc[pLevel[k].iFrom], sizeof(pOrTab->a[k]));
}
}else{
pOrTab = pWInfo->pTabList;
}
/* Initialize the rowset register to contain NULL. An SQL NULL is
** equivalent to an empty rowset. Or, create an ephemeral index
** capable of holding primary keys in the case of a WITHOUT ROWID.
**
** Also initialize regReturn to contain the address of the instruction
** immediately following the OP_Return at the bottom of the loop. This
** is required in a few obscure LEFT JOIN cases where control jumps
** over the top of the loop into the body of it. In this case the
** correct response for the end-of-loop code (the OP_Return) is to
** fall through to the next instruction, just as an OP_Next does if
** called on an uninitialized cursor.
*/
if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
if( HasRowid(pTab) ){
regRowset = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_Null, 0, regRowset);
}else{
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
regRowset = pParse->nTab++;
sqlite3VdbeAddOp2(v, OP_OpenEphemeral, regRowset, pPk->nKeyCol);
sqlite3VdbeSetP4KeyInfo(pParse, pPk);
}
regRowid = ++pParse->nMem;
}
iRetInit = sqlite3VdbeAddOp2(v, OP_Integer, 0, regReturn);
/* If the original WHERE clause is z of the form: (x1 OR x2 OR ...) AND y
** Then for every term xN, evaluate as the subexpression: xN AND z
** That way, terms in y that are factored into the disjunction will
** be picked up by the recursive calls to sqlite3WhereBegin() below.
**
** Actually, each subexpression is converted to "xN AND w" where w is
** the "interesting" terms of z - terms that did not originate in the
** ON or USING clause of a LEFT JOIN, and terms that are usable as
** indices.
**
** This optimization also only applies if the (x1 OR x2 OR ...) term
** is not contained in the ON clause of a LEFT JOIN.
** See ticket http://www.sqlite.org/src/info/f2369304e4
*/
if( pWC->nTerm>1 ){
int iTerm;
for(iTerm=0; iTerm<pWC->nTerm; iTerm++){
Expr *pExpr = pWC->a[iTerm].pExpr;
if( &pWC->a[iTerm] == pTerm ) continue;
testcase( pWC->a[iTerm].wtFlags & TERM_VIRTUAL );
testcase( pWC->a[iTerm].wtFlags & TERM_CODED );
if( (pWC->a[iTerm].wtFlags & (TERM_VIRTUAL|TERM_CODED))!=0 ) continue;
if( (pWC->a[iTerm].eOperator & WO_ALL)==0 ) continue;
testcase( pWC->a[iTerm].wtFlags & TERM_ORINFO );
pExpr = sqlite3ExprDup(db, pExpr, 0);
pAndExpr = sqlite3ExprAnd(pParse, pAndExpr, pExpr);
}
if( pAndExpr ){
/* The extra 0x10000 bit on the opcode is masked off and does not
** become part of the new Expr.op. However, it does make the
** op==TK_AND comparison inside of sqlite3PExpr() false, and this
** prevents sqlite3PExpr() from implementing AND short-circuit
** optimization, which we do not want here. */
pAndExpr = sqlite3PExpr(pParse, TK_AND|0x10000, 0, pAndExpr);
}
}
/* Run a separate WHERE clause for each term of the OR clause. After
** eliminating duplicates from other WHERE clauses, the action for each
** sub-WHERE clause is to to invoke the main loop body as a subroutine.
*/
wctrlFlags = WHERE_OR_SUBCLAUSE | (pWInfo->wctrlFlags & WHERE_SEEK_TABLE);
ExplainQueryPlan((pParse, 1, "MULTI-INDEX OR"));
for(ii=0; ii<pOrWc->nTerm; ii++){
WhereTerm *pOrTerm = &pOrWc->a[ii];
if( pOrTerm->leftCursor==iCur || (pOrTerm->eOperator & WO_AND)!=0 ){
WhereInfo *pSubWInfo; /* Info for single OR-term scan */
Expr *pOrExpr = pOrTerm->pExpr; /* Current OR clause term */
int jmp1 = 0; /* Address of jump operation */
assert( (pTabItem[0].fg.jointype & JT_LEFT)==0
|| ExprHasProperty(pOrExpr, EP_FromJoin)
);
if( pAndExpr ){
pAndExpr->pLeft = pOrExpr;
pOrExpr = pAndExpr;
}
/* Loop through table entries that match term pOrTerm. */
ExplainQueryPlan((pParse, 1, "INDEX %d", ii+1));
WHERETRACE(0xffff, ("Subplan for OR-clause:\n"));
pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0,
wctrlFlags, iCovCur);
assert( pSubWInfo || pParse->nErr || db->mallocFailed );
if( pSubWInfo ){
WhereLoop *pSubLoop;
int addrExplain = sqlite3WhereExplainOneScan(
pParse, pOrTab, &pSubWInfo->a[0], 0
);
sqlite3WhereAddScanStatus(v, pOrTab, &pSubWInfo->a[0], addrExplain);
/* This is the sub-WHERE clause body. First skip over
** duplicate rows from prior sub-WHERE clauses, and record the
** rowid (or PRIMARY KEY) for the current row so that the same
** row will be skipped in subsequent sub-WHERE clauses.
*/
if( (pWInfo->wctrlFlags & WHERE_DUPLICATES_OK)==0 ){
int iSet = ((ii==pOrWc->nTerm-1)?-1:ii);
if( HasRowid(pTab) ){
sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, -1, regRowid);
jmp1 = sqlite3VdbeAddOp4Int(v, OP_RowSetTest, regRowset, 0,
regRowid, iSet);
VdbeCoverage(v);
}else{
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
int nPk = pPk->nKeyCol;
int iPk;
int r;
/* Read the PK into an array of temp registers. */
r = sqlite3GetTempRange(pParse, nPk);
for(iPk=0; iPk<nPk; iPk++){
int iCol = pPk->aiColumn[iPk];
sqlite3ExprCodeGetColumnOfTable(v, pTab, iCur, iCol,r+iPk);
}
/* Check if the temp table already contains this key. If so,
** the row has already been included in the result set and
** can be ignored (by jumping past the Gosub below). Otherwise,
** insert the key into the temp table and proceed with processing
** the row.
**
** Use some of the same optimizations as OP_RowSetTest: If iSet
** is zero, assume that the key cannot already be present in
** the temp table. And if iSet is -1, assume that there is no
** need to insert the key into the temp table, as it will never
** be tested for. */
if( iSet ){
jmp1 = sqlite3VdbeAddOp4Int(v, OP_Found, regRowset, 0, r, nPk);
VdbeCoverage(v);
}
if( iSet>=0 ){
sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid);
sqlite3VdbeAddOp4Int(v, OP_IdxInsert, regRowset, regRowid,
r, nPk);
if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
}
/* Release the array of temp registers */
sqlite3ReleaseTempRange(pParse, r, nPk);
}
}
/* Invoke the main loop body as a subroutine */
sqlite3VdbeAddOp2(v, OP_Gosub, regReturn, iLoopBody);
/* Jump here (skipping the main loop body subroutine) if the
** current sub-WHERE row is a duplicate from prior sub-WHEREs. */
if( jmp1 ) sqlite3VdbeJumpHere(v, jmp1);
/* The pSubWInfo->untestedTerms flag means that this OR term
** contained one or more AND term from a notReady table. The
** terms from the notReady table could not be tested and will
** need to be tested later.
*/
if( pSubWInfo->untestedTerms ) untestedTerms = 1;
/* If all of the OR-connected terms are optimized using the same
** index, and the index is opened using the same cursor number
** by each call to sqlite3WhereBegin() made by this loop, it may
** be possible to use that index as a covering index.
**
** If the call to sqlite3WhereBegin() above resulted in a scan that
** uses an index, and this is either the first OR-connected term
** processed or the index is the same as that used by all previous
** terms, set pCov to the candidate covering index. Otherwise, set
** pCov to NULL to indicate that no candidate covering index will
** be available.
*/
pSubLoop = pSubWInfo->a[0].pWLoop;
assert( (pSubLoop->wsFlags & WHERE_AUTO_INDEX)==0 );
if( (pSubLoop->wsFlags & WHERE_INDEXED)!=0
&& (ii==0 || pSubLoop->u.btree.pIndex==pCov)
&& (HasRowid(pTab) || !IsPrimaryKeyIndex(pSubLoop->u.btree.pIndex))
){
assert( pSubWInfo->a[0].iIdxCur==iCovCur );
pCov = pSubLoop->u.btree.pIndex;
}else{
pCov = 0;
}
/* Finish the loop through table entries that match term pOrTerm. */
sqlite3WhereEnd(pSubWInfo);
ExplainQueryPlanPop(pParse);
}
}
}
ExplainQueryPlanPop(pParse);
pLevel->u.pCovidx = pCov;
if( pCov ) pLevel->iIdxCur = iCovCur;
if( pAndExpr ){
pAndExpr->pLeft = 0;
sqlite3ExprDelete(db, pAndExpr);
}
sqlite3VdbeChangeP1(v, iRetInit, sqlite3VdbeCurrentAddr(v));
sqlite3VdbeGoto(v, pLevel->addrBrk);
sqlite3VdbeResolveLabel(v, iLoopBody);
if( pWInfo->nLevel>1 ){ sqlite3StackFree(db, pOrTab); }
if( !untestedTerms ) disableTerm(pLevel, pTerm);
}else
#endif /* SQLITE_OMIT_OR_OPTIMIZATION */
{
/* Case 6: There is no usable index. We must do a complete
** scan of the entire table.
*/
static const u8 aStep[] = { OP_Next, OP_Prev };
static const u8 aStart[] = { OP_Rewind, OP_Last };
assert( bRev==0 || bRev==1 );
if( pTabItem->fg.isRecursive ){
/* Tables marked isRecursive have only a single row that is stored in
** a pseudo-cursor. No need to Rewind or Next such cursors. */
pLevel->op = OP_Noop;
}else{
codeCursorHint(pTabItem, pWInfo, pLevel, 0);
pLevel->op = aStep[bRev];
pLevel->p1 = iCur;
pLevel->p2 = 1 + sqlite3VdbeAddOp2(v, aStart[bRev], iCur, addrHalt);
VdbeCoverageIf(v, bRev==0);
VdbeCoverageIf(v, bRev!=0);
pLevel->p5 = SQLITE_STMTSTATUS_FULLSCAN_STEP;
}
}
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
pLevel->addrVisit = sqlite3VdbeCurrentAddr(v);
#endif
/* Insert code to test every subexpression that can be completely
** computed using the current set of tables.
**
** This loop may run between one and three times, depending on the
** constraints to be generated. The value of stack variable iLoop
** determines the constraints coded by each iteration, as follows:
**
** iLoop==1: Code only expressions that are entirely covered by pIdx.
** iLoop==2: Code remaining expressions that do not contain correlated
** sub-queries.
** iLoop==3: Code all remaining expressions.
**
** An effort is made to skip unnecessary iterations of the loop.
*/
iLoop = (pIdx ? 1 : 2);
do{
int iNext = 0; /* Next value for iLoop */
for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
Expr *pE;
int skipLikeAddr = 0;
testcase( pTerm->wtFlags & TERM_VIRTUAL );
testcase( pTerm->wtFlags & TERM_CODED );
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
testcase( pWInfo->untestedTerms==0
&& (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)!=0 );
pWInfo->untestedTerms = 1;
continue;
}
pE = pTerm->pExpr;
assert( pE!=0 );
if( (pTabItem->fg.jointype&JT_LEFT) && !ExprHasProperty(pE,EP_FromJoin) ){
continue;
}
if( iLoop==1 && !sqlite3ExprCoveredByIndex(pE, pLevel->iTabCur, pIdx) ){
iNext = 2;
continue;
}
if( iLoop<3 && (pTerm->wtFlags & TERM_VARSELECT) ){
if( iNext==0 ) iNext = 3;
continue;
}
if( (pTerm->wtFlags & TERM_LIKECOND)!=0 ){
/* If the TERM_LIKECOND flag is set, that means that the range search
** is sufficient to guarantee that the LIKE operator is true, so we
** can skip the call to the like(A,B) function. But this only works
** for strings. So do not skip the call to the function on the pass
** that compares BLOBs. */
#ifdef SQLITE_LIKE_DOESNT_MATCH_BLOBS
continue;
#else
u32 x = pLevel->iLikeRepCntr;
if( x>0 ){
skipLikeAddr = sqlite3VdbeAddOp1(v, (x&1)?OP_IfNot:OP_If,(int)(x>>1));
VdbeCoverageIf(v, (x&1)==1);
VdbeCoverageIf(v, (x&1)==0);
}
#endif
}
#ifdef WHERETRACE_ENABLED /* 0xffff */
if( sqlite3WhereTrace ){
VdbeNoopComment((v, "WhereTerm[%d] (%p) priority=%d",
pWC->nTerm-j, pTerm, iLoop));
}
#endif
sqlite3ExprIfFalse(pParse, pE, addrCont, SQLITE_JUMPIFNULL);
if( skipLikeAddr ) sqlite3VdbeJumpHere(v, skipLikeAddr);
pTerm->wtFlags |= TERM_CODED;
}
iLoop = iNext;
}while( iLoop>0 );
/* Insert code to test for implied constraints based on transitivity
** of the "==" operator.
**
** Example: If the WHERE clause contains "t1.a=t2.b" and "t2.b=123"
** and we are coding the t1 loop and the t2 loop has not yet coded,
** then we cannot use the "t1.a=t2.b" constraint, but we can code
** the implied "t1.a=123" constraint.
*/
for(pTerm=pWC->a, j=pWC->nTerm; j>0; j--, pTerm++){
Expr *pE, sEAlt;
WhereTerm *pAlt;
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
if( (pTerm->eOperator & (WO_EQ|WO_IS))==0 ) continue;
if( (pTerm->eOperator & WO_EQUIV)==0 ) continue;
if( pTerm->leftCursor!=iCur ) continue;
if( pLevel->iLeftJoin ) continue;
pE = pTerm->pExpr;
assert( !ExprHasProperty(pE, EP_FromJoin) );
assert( (pTerm->prereqRight & pLevel->notReady)!=0 );
pAlt = sqlite3WhereFindTerm(pWC, iCur, pTerm->u.leftColumn, notReady,
WO_EQ|WO_IN|WO_IS, 0);
if( pAlt==0 ) continue;
if( pAlt->wtFlags & (TERM_CODED) ) continue;
if( (pAlt->eOperator & WO_IN)
&& (pAlt->pExpr->flags & EP_xIsSelect)
&& (pAlt->pExpr->x.pSelect->pEList->nExpr>1)
){
continue;
}
testcase( pAlt->eOperator & WO_EQ );
testcase( pAlt->eOperator & WO_IS );
testcase( pAlt->eOperator & WO_IN );
VdbeModuleComment((v, "begin transitive constraint"));
sEAlt = *pAlt->pExpr;
sEAlt.pLeft = pE->pLeft;
sqlite3ExprIfFalse(pParse, &sEAlt, addrCont, SQLITE_JUMPIFNULL);
}
/* For a LEFT OUTER JOIN, generate code that will record the fact that
** at least one row of the right table has matched the left table.
*/
if( pLevel->iLeftJoin ){
pLevel->addrFirst = sqlite3VdbeCurrentAddr(v);
sqlite3VdbeAddOp2(v, OP_Integer, 1, pLevel->iLeftJoin);
VdbeComment((v, "record LEFT JOIN hit"));
for(pTerm=pWC->a, j=0; j<pWC->nTerm; j++, pTerm++){
testcase( pTerm->wtFlags & TERM_VIRTUAL );
testcase( pTerm->wtFlags & TERM_CODED );
if( pTerm->wtFlags & (TERM_VIRTUAL|TERM_CODED) ) continue;
if( (pTerm->prereqAll & pLevel->notReady)!=0 ){
assert( pWInfo->untestedTerms );
continue;
}
assert( pTerm->pExpr );
sqlite3ExprIfFalse(pParse, pTerm->pExpr, addrCont, SQLITE_JUMPIFNULL);
pTerm->wtFlags |= TERM_CODED;
}
}
return pLevel->notReady;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_1275_3 |
crossvul-cpp_data_bad_5217_1 | /*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* The flush SMB is sent to ensure all data and allocation information
* for the corresponding file has been written to stable storage. This
* is a synchronous request. The response should not be sent until the
* writes are complete.
*
* The SmbFlush request is described in CIFS/1.0 1996 Section 3.9.14.
*
* CIFS/1.0 June 13, 1996
* Heizer, et al
* draft-heizer-cifs-v1-spec-00.txt
*/
#include <smbsrv/smb_kproto.h>
#include <smbsrv/smb_fsops.h>
static void smb_flush_file(struct smb_request *sr, struct smb_ofile *ofile);
/*
* smb_com_flush
*
* Flush any cached data for a specified file, or for all files that
* this client has open, to stable storage. If the fid is valid (i.e.
* not 0xFFFF), we flush only that file. Otherwise we flush all files
* associated with this client.
*
* We need to protect the list because there's a good chance we'll
* block during the flush operation.
*/
smb_sdrc_t
smb_pre_flush(smb_request_t *sr)
{
int rc;
rc = smbsr_decode_vwv(sr, "w", &sr->smb_fid);
DTRACE_SMB_1(op__Flush__start, smb_request_t *, sr);
return ((rc == 0) ? SDRC_SUCCESS : SDRC_ERROR);
}
void
smb_post_flush(smb_request_t *sr)
{
DTRACE_SMB_1(op__Flush__done, smb_request_t *, sr);
}
smb_sdrc_t
smb_com_flush(smb_request_t *sr)
{
smb_ofile_t *file;
smb_llist_t *flist;
int rc;
if (smb_flush_required == 0) {
rc = smbsr_encode_empty_result(sr);
return ((rc == 0) ? SDRC_SUCCESS : SDRC_ERROR);
}
if (sr->smb_fid != 0xffff) {
smbsr_lookup_file(sr);
if (sr->fid_ofile == NULL) {
smbsr_error(sr, NT_STATUS_INVALID_HANDLE,
ERRDOS, ERRbadfid);
return (SDRC_ERROR);
}
smb_flush_file(sr, sr->fid_ofile);
} else {
flist = &sr->tid_tree->t_ofile_list;
smb_llist_enter(flist, RW_READER);
file = smb_llist_head(flist);
while (file) {
mutex_enter(&file->f_mutex);
smb_flush_file(sr, file);
mutex_exit(&file->f_mutex);
file = smb_llist_next(flist, file);
}
smb_llist_exit(flist);
}
rc = smbsr_encode_empty_result(sr);
return ((rc == 0) ? SDRC_SUCCESS : SDRC_ERROR);
}
/*
* smb_flush_file
*
* If writes on this file are not synchronous, flush it using the NFSv3
* commit interface.
*/
static void
smb_flush_file(struct smb_request *sr, struct smb_ofile *ofile)
{
sr->user_cr = smb_ofile_getcred(ofile);
if ((ofile->f_node->flags & NODE_FLAGS_WRITE_THROUGH) == 0)
(void) smb_fsop_commit(sr, sr->user_cr, ofile->f_node);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_5217_1 |
crossvul-cpp_data_good_2200_0 | /*
* Copyright (C) 2006,2008 by the Massachusetts Institute of Technology.
* All rights reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
/*
* Copyright 2004 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*
* A module that implements the spnego security mechanism.
* It is used to negotiate the security mechanism between
* peers using the GSS-API. SPNEGO is specified in RFC 4178.
*
*/
/*
* Copyright (c) 2006-2008, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/* #pragma ident "@(#)spnego_mech.c 1.7 04/09/28 SMI" */
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <k5-int.h>
#include <krb5.h>
#include <mglueP.h>
#include "gssapiP_spnego.h"
#include <gssapi_err_generic.h>
#undef g_token_size
#undef g_verify_token_header
#undef g_make_token_header
#define HARD_ERROR(v) ((v) != GSS_S_COMPLETE && (v) != GSS_S_CONTINUE_NEEDED)
typedef const gss_OID_desc *gss_OID_const;
/* der routines defined in libgss */
extern unsigned int gssint_der_length_size(unsigned int);
extern int gssint_get_der_length(unsigned char **, unsigned int,
unsigned int*);
extern int gssint_put_der_length(unsigned int, unsigned char **, unsigned int);
/* private routines for spnego_mechanism */
static spnego_token_t make_spnego_token(const char *);
static gss_buffer_desc make_err_msg(const char *);
static int g_token_size(gss_OID_const, unsigned int);
static int g_make_token_header(gss_OID_const, unsigned int,
unsigned char **, unsigned int);
static int g_verify_token_header(gss_OID_const, unsigned int *,
unsigned char **,
int, unsigned int);
static int g_verify_neg_token_init(unsigned char **, unsigned int);
static gss_OID get_mech_oid(OM_uint32 *, unsigned char **, size_t);
static gss_buffer_t get_input_token(unsigned char **, unsigned int);
static gss_OID_set get_mech_set(OM_uint32 *, unsigned char **, unsigned int);
static OM_uint32 get_req_flags(unsigned char **, OM_uint32, OM_uint32 *);
static OM_uint32 get_available_mechs(OM_uint32 *, gss_name_t, gss_cred_usage_t,
gss_const_key_value_set_t,
gss_cred_id_t *, gss_OID_set *);
static OM_uint32 get_negotiable_mechs(OM_uint32 *, spnego_gss_cred_id_t,
gss_cred_usage_t, gss_OID_set *);
static void release_spnego_ctx(spnego_gss_ctx_id_t *);
static void check_spnego_options(spnego_gss_ctx_id_t);
static spnego_gss_ctx_id_t create_spnego_ctx(void);
static int put_mech_set(gss_OID_set mechSet, gss_buffer_t buf);
static int put_input_token(unsigned char **, gss_buffer_t, unsigned int);
static int put_mech_oid(unsigned char **, gss_OID_const, unsigned int);
static int put_negResult(unsigned char **, OM_uint32, unsigned int);
static OM_uint32
process_mic(OM_uint32 *, gss_buffer_t, spnego_gss_ctx_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
handle_mic(OM_uint32 *, gss_buffer_t, int, spnego_gss_ctx_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_new(OM_uint32 *, spnego_gss_cred_id_t, gss_ctx_id_t *,
send_token_flag *);
static OM_uint32
init_ctx_nego(OM_uint32 *, spnego_gss_ctx_id_t, OM_uint32, gss_OID,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_cont(OM_uint32 *, gss_ctx_id_t *, gss_buffer_t,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_reselect(OM_uint32 *, spnego_gss_ctx_id_t, OM_uint32,
gss_OID, gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
init_ctx_call_init(OM_uint32 *, spnego_gss_ctx_id_t, spnego_gss_cred_id_t,
gss_name_t, OM_uint32, OM_uint32, gss_buffer_t,
gss_OID *, gss_buffer_t, OM_uint32 *, OM_uint32 *,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_new(OM_uint32 *, gss_buffer_t, gss_ctx_id_t *,
spnego_gss_cred_id_t, gss_buffer_t *,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_cont(OM_uint32 *, gss_buffer_t, gss_ctx_id_t *,
gss_buffer_t *, gss_buffer_t *,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_vfy_oid(OM_uint32 *, spnego_gss_ctx_id_t, gss_OID,
OM_uint32 *, send_token_flag *);
static OM_uint32
acc_ctx_call_acc(OM_uint32 *, spnego_gss_ctx_id_t, spnego_gss_cred_id_t,
gss_buffer_t, gss_OID *, gss_buffer_t,
OM_uint32 *, OM_uint32 *, gss_cred_id_t *,
OM_uint32 *, send_token_flag *);
static gss_OID
negotiate_mech(gss_OID_set, gss_OID_set, OM_uint32 *);
static int
g_get_tag_and_length(unsigned char **, int, unsigned int, unsigned int *);
static int
make_spnego_tokenInit_msg(spnego_gss_ctx_id_t,
int,
gss_buffer_t,
OM_uint32, gss_buffer_t, send_token_flag,
gss_buffer_t);
static int
make_spnego_tokenTarg_msg(OM_uint32, gss_OID, gss_buffer_t,
gss_buffer_t, send_token_flag,
gss_buffer_t);
static OM_uint32
get_negTokenInit(OM_uint32 *, gss_buffer_t, gss_buffer_t,
gss_OID_set *, OM_uint32 *, gss_buffer_t *,
gss_buffer_t *);
static OM_uint32
get_negTokenResp(OM_uint32 *, unsigned char *, unsigned int,
OM_uint32 *, gss_OID *, gss_buffer_t *, gss_buffer_t *);
static int
is_kerb_mech(gss_OID oid);
/* SPNEGO oid structure */
static const gss_OID_desc spnego_oids[] = {
{SPNEGO_OID_LENGTH, SPNEGO_OID},
};
const gss_OID_desc * const gss_mech_spnego = spnego_oids+0;
static const gss_OID_set_desc spnego_oidsets[] = {
{1, (gss_OID) spnego_oids+0},
};
const gss_OID_set_desc * const gss_mech_set_spnego = spnego_oidsets+0;
static int make_NegHints(OM_uint32 *, spnego_gss_cred_id_t, gss_buffer_t *);
static int put_neg_hints(unsigned char **, gss_buffer_t, unsigned int);
static OM_uint32
acc_ctx_hints(OM_uint32 *, gss_ctx_id_t *, spnego_gss_cred_id_t,
gss_buffer_t *, OM_uint32 *, send_token_flag *);
/*
* The Mech OID for SPNEGO:
* { iso(1) org(3) dod(6) internet(1) security(5)
* mechanism(5) spnego(2) }
*/
static struct gss_config spnego_mechanism =
{
{SPNEGO_OID_LENGTH, SPNEGO_OID},
NULL,
spnego_gss_acquire_cred,
spnego_gss_release_cred,
spnego_gss_init_sec_context,
#ifndef LEAN_CLIENT
spnego_gss_accept_sec_context,
#else
NULL,
#endif /* LEAN_CLIENT */
NULL, /* gss_process_context_token */
spnego_gss_delete_sec_context, /* gss_delete_sec_context */
spnego_gss_context_time, /* gss_context_time */
spnego_gss_get_mic, /* gss_get_mic */
spnego_gss_verify_mic, /* gss_verify_mic */
spnego_gss_wrap, /* gss_wrap */
spnego_gss_unwrap, /* gss_unwrap */
spnego_gss_display_status,
NULL, /* gss_indicate_mechs */
spnego_gss_compare_name,
spnego_gss_display_name,
spnego_gss_import_name,
spnego_gss_release_name,
spnego_gss_inquire_cred, /* gss_inquire_cred */
NULL, /* gss_add_cred */
#ifndef LEAN_CLIENT
spnego_gss_export_sec_context, /* gss_export_sec_context */
spnego_gss_import_sec_context, /* gss_import_sec_context */
#else
NULL, /* gss_export_sec_context */
NULL, /* gss_import_sec_context */
#endif /* LEAN_CLIENT */
NULL, /* gss_inquire_cred_by_mech */
spnego_gss_inquire_names_for_mech,
spnego_gss_inquire_context, /* gss_inquire_context */
NULL, /* gss_internal_release_oid */
spnego_gss_wrap_size_limit, /* gss_wrap_size_limit */
NULL, /* gssd_pname_to_uid */
NULL, /* gss_userok */
NULL, /* gss_export_name */
spnego_gss_duplicate_name, /* gss_duplicate_name */
NULL, /* gss_store_cred */
spnego_gss_inquire_sec_context_by_oid, /* gss_inquire_sec_context_by_oid */
spnego_gss_inquire_cred_by_oid, /* gss_inquire_cred_by_oid */
spnego_gss_set_sec_context_option, /* gss_set_sec_context_option */
spnego_gss_set_cred_option, /* gssspi_set_cred_option */
NULL, /* gssspi_mech_invoke */
spnego_gss_wrap_aead,
spnego_gss_unwrap_aead,
spnego_gss_wrap_iov,
spnego_gss_unwrap_iov,
spnego_gss_wrap_iov_length,
spnego_gss_complete_auth_token,
spnego_gss_acquire_cred_impersonate_name,
NULL, /* gss_add_cred_impersonate_name */
spnego_gss_display_name_ext,
spnego_gss_inquire_name,
spnego_gss_get_name_attribute,
spnego_gss_set_name_attribute,
spnego_gss_delete_name_attribute,
spnego_gss_export_name_composite,
spnego_gss_map_name_to_any,
spnego_gss_release_any_name_mapping,
spnego_gss_pseudo_random,
spnego_gss_set_neg_mechs,
spnego_gss_inquire_saslname_for_mech,
spnego_gss_inquire_mech_for_saslname,
spnego_gss_inquire_attrs_for_mech,
spnego_gss_acquire_cred_from,
NULL, /* gss_store_cred_into */
spnego_gss_acquire_cred_with_password,
spnego_gss_export_cred,
spnego_gss_import_cred,
NULL, /* gssspi_import_sec_context_by_mech */
NULL, /* gssspi_import_name_by_mech */
NULL, /* gssspi_import_cred_by_mech */
spnego_gss_get_mic_iov,
spnego_gss_verify_mic_iov,
spnego_gss_get_mic_iov_length
};
#ifdef _GSS_STATIC_LINK
#include "mglueP.h"
static int gss_spnegomechglue_init(void)
{
struct gss_mech_config mech_spnego;
memset(&mech_spnego, 0, sizeof(mech_spnego));
mech_spnego.mech = &spnego_mechanism;
mech_spnego.mechNameStr = "spnego";
mech_spnego.mech_type = GSS_C_NO_OID;
return gssint_register_mechinfo(&mech_spnego);
}
#else
gss_mechanism KRB5_CALLCONV
gss_mech_initialize(void)
{
return (&spnego_mechanism);
}
MAKE_INIT_FUNCTION(gss_krb5int_lib_init);
MAKE_FINI_FUNCTION(gss_krb5int_lib_fini);
int gss_krb5int_lib_init(void);
#endif /* _GSS_STATIC_LINK */
int gss_spnegoint_lib_init(void)
{
int err;
err = k5_key_register(K5_KEY_GSS_SPNEGO_STATUS, NULL);
if (err)
return err;
#ifdef _GSS_STATIC_LINK
return gss_spnegomechglue_init();
#else
return 0;
#endif
}
void gss_spnegoint_lib_fini(void)
{
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred(OM_uint32 *minor_status,
gss_name_t desired_name,
OM_uint32 time_req,
gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
return spnego_gss_acquire_cred_from(minor_status, desired_name, time_req,
desired_mechs, cred_usage, NULL,
output_cred_handle, actual_mechs,
time_rec);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_from(OM_uint32 *minor_status,
const gss_name_t desired_name,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_const_key_value_set_t cred_store,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status, tmpmin;
gss_OID_set amechs;
gss_cred_id_t mcred = NULL;
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_acquire_cred\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
/* We will obtain a mechglue credential and wrap it in a
* spnego_gss_cred_id_rec structure. Allocate the wrapper. */
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->neg_mechs = GSS_C_NULL_OID_SET;
/*
* Always use get_available_mechs to collect a list of
* mechs for which creds are available.
*/
status = get_available_mechs(minor_status, desired_name,
cred_usage, cred_store, &mcred,
&amechs);
if (actual_mechs && amechs != GSS_C_NULL_OID_SET) {
(void) gssint_copy_oid_set(&tmpmin, amechs, actual_mechs);
}
(void) gss_release_oid_set(&tmpmin, &amechs);
if (status == GSS_S_COMPLETE) {
spcred->mcred = mcred;
*output_cred_handle = (gss_cred_id_t)spcred;
} else {
free(spcred);
*output_cred_handle = GSS_C_NO_CREDENTIAL;
}
dsyslog("Leaving spnego_gss_acquire_cred\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_release_cred(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle)
{
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_release_cred\n");
if (minor_status == NULL || cred_handle == NULL)
return (GSS_S_CALL_INACCESSIBLE_WRITE);
*minor_status = 0;
if (*cred_handle == GSS_C_NO_CREDENTIAL)
return (GSS_S_COMPLETE);
spcred = (spnego_gss_cred_id_t)*cred_handle;
*cred_handle = GSS_C_NO_CREDENTIAL;
gss_release_oid_set(minor_status, &spcred->neg_mechs);
gss_release_cred(minor_status, &spcred->mcred);
free(spcred);
dsyslog("Leaving spnego_gss_release_cred\n");
return (GSS_S_COMPLETE);
}
static void
check_spnego_options(spnego_gss_ctx_id_t spnego_ctx)
{
spnego_ctx->optionStr = gssint_get_modOptions(
(const gss_OID)&spnego_oids[0]);
}
static spnego_gss_ctx_id_t
create_spnego_ctx(void)
{
spnego_gss_ctx_id_t spnego_ctx = NULL;
spnego_ctx = (spnego_gss_ctx_id_t)
malloc(sizeof (spnego_gss_ctx_id_rec));
if (spnego_ctx == NULL) {
return (NULL);
}
spnego_ctx->magic_num = SPNEGO_MAGIC_ID;
spnego_ctx->ctx_handle = GSS_C_NO_CONTEXT;
spnego_ctx->mech_set = NULL;
spnego_ctx->internal_mech = NULL;
spnego_ctx->optionStr = NULL;
spnego_ctx->DER_mechTypes.length = 0;
spnego_ctx->DER_mechTypes.value = NULL;
spnego_ctx->default_cred = GSS_C_NO_CREDENTIAL;
spnego_ctx->mic_reqd = 0;
spnego_ctx->mic_sent = 0;
spnego_ctx->mic_rcvd = 0;
spnego_ctx->mech_complete = 0;
spnego_ctx->nego_done = 0;
spnego_ctx->internal_name = GSS_C_NO_NAME;
spnego_ctx->actual_mech = GSS_C_NO_OID;
check_spnego_options(spnego_ctx);
return (spnego_ctx);
}
/*
* Both initiator and acceptor call here to verify and/or create mechListMIC,
* and to consistency-check the MIC state. handle_mic is invoked only if the
* negotiated mech has completed and supports MICs.
*/
static OM_uint32
handle_mic(OM_uint32 *minor_status, gss_buffer_t mic_in,
int send_mechtok, spnego_gss_ctx_id_t sc,
gss_buffer_t *mic_out,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
ret = GSS_S_FAILURE;
*mic_out = GSS_C_NO_BUFFER;
if (mic_in != GSS_C_NO_BUFFER) {
if (sc->mic_rcvd) {
/* Reject MIC if we've already received a MIC. */
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_DEFECTIVE_TOKEN;
}
} else if (sc->mic_reqd && !send_mechtok) {
/*
* If the peer sends the final mechanism token, it
* must send the MIC with that token if the
* negotiation requires MICs.
*/
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_DEFECTIVE_TOKEN;
}
ret = process_mic(minor_status, mic_in, sc, mic_out,
negState, tokflag);
if (ret != GSS_S_COMPLETE) {
return ret;
}
if (sc->mic_reqd) {
assert(sc->mic_sent || sc->mic_rcvd);
}
if (sc->mic_sent && sc->mic_rcvd) {
ret = GSS_S_COMPLETE;
*negState = ACCEPT_COMPLETE;
if (*mic_out == GSS_C_NO_BUFFER) {
/*
* We sent a MIC on the previous pass; we
* shouldn't be sending a mechanism token.
*/
assert(!send_mechtok);
*tokflag = NO_TOKEN_SEND;
} else {
*tokflag = CONT_TOKEN_SEND;
}
} else if (sc->mic_reqd) {
*negState = ACCEPT_INCOMPLETE;
ret = GSS_S_CONTINUE_NEEDED;
} else if (*negState == ACCEPT_COMPLETE) {
ret = GSS_S_COMPLETE;
} else {
ret = GSS_S_CONTINUE_NEEDED;
}
return ret;
}
/*
* Perform the actual verification and/or generation of mechListMIC.
*/
static OM_uint32
process_mic(OM_uint32 *minor_status, gss_buffer_t mic_in,
spnego_gss_ctx_id_t sc, gss_buffer_t *mic_out,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin;
gss_qop_t qop_state;
gss_buffer_desc tmpmic = GSS_C_EMPTY_BUFFER;
ret = GSS_S_FAILURE;
if (mic_in != GSS_C_NO_BUFFER) {
ret = gss_verify_mic(minor_status, sc->ctx_handle,
&sc->DER_mechTypes,
mic_in, &qop_state);
if (ret != GSS_S_COMPLETE) {
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return ret;
}
/* If we got a MIC, we must send a MIC. */
sc->mic_reqd = 1;
sc->mic_rcvd = 1;
}
if (sc->mic_reqd && !sc->mic_sent) {
ret = gss_get_mic(minor_status, sc->ctx_handle,
GSS_C_QOP_DEFAULT,
&sc->DER_mechTypes,
&tmpmic);
if (ret != GSS_S_COMPLETE) {
gss_release_buffer(&tmpmin, &tmpmic);
*tokflag = NO_TOKEN_SEND;
return ret;
}
*mic_out = malloc(sizeof(gss_buffer_desc));
if (*mic_out == GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, &tmpmic);
*tokflag = NO_TOKEN_SEND;
return GSS_S_FAILURE;
}
**mic_out = tmpmic;
sc->mic_sent = 1;
}
return GSS_S_COMPLETE;
}
/*
* Initial call to spnego_gss_init_sec_context().
*/
static OM_uint32
init_ctx_new(OM_uint32 *minor_status,
spnego_gss_cred_id_t spcred,
gss_ctx_id_t *ctx,
send_token_flag *tokflag)
{
OM_uint32 ret;
spnego_gss_ctx_id_t sc = NULL;
sc = create_spnego_ctx();
if (sc == NULL)
return GSS_S_FAILURE;
/* determine negotiation mech set */
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_INITIATE,
&sc->mech_set);
if (ret != GSS_S_COMPLETE)
goto cleanup;
/* Set an initial internal mech to make the first context token. */
sc->internal_mech = &sc->mech_set->elements[0];
if (put_mech_set(sc->mech_set, &sc->DER_mechTypes) < 0) {
ret = GSS_S_FAILURE;
goto cleanup;
}
/*
* The actual context is not yet determined, set the output
* context handle to refer to the spnego context itself.
*/
sc->ctx_handle = GSS_C_NO_CONTEXT;
*ctx = (gss_ctx_id_t)sc;
sc = NULL;
*tokflag = INIT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
cleanup:
release_spnego_ctx(&sc);
return ret;
}
/*
* Called by second and later calls to spnego_gss_init_sec_context()
* to decode reply and update state.
*/
static OM_uint32
init_ctx_cont(OM_uint32 *minor_status, gss_ctx_id_t *ctx, gss_buffer_t buf,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin, acc_negState;
unsigned char *ptr;
spnego_gss_ctx_id_t sc;
gss_OID supportedMech = GSS_C_NO_OID;
sc = (spnego_gss_ctx_id_t)*ctx;
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ptr = buf->value;
ret = get_negTokenResp(minor_status, ptr, buf->length,
&acc_negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (acc_negState == ACCEPT_DEFECTIVE_TOKEN &&
supportedMech == GSS_C_NO_OID &&
*responseToken == GSS_C_NO_BUFFER &&
*mechListMIC == GSS_C_NO_BUFFER) {
/* Reject "empty" token. */
ret = GSS_S_DEFECTIVE_TOKEN;
}
if (acc_negState == REJECT) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_FAILURE;
goto cleanup;
}
/*
* nego_done is false for the first call to init_ctx_cont()
*/
if (!sc->nego_done) {
ret = init_ctx_nego(minor_status, sc,
acc_negState,
supportedMech, responseToken,
mechListMIC,
negState, tokflag);
} else if ((!sc->mech_complete && *responseToken == GSS_C_NO_BUFFER) ||
(sc->mech_complete && *responseToken != GSS_C_NO_BUFFER)) {
/* Missing or spurious token from acceptor. */
ret = GSS_S_DEFECTIVE_TOKEN;
} else if (!sc->mech_complete ||
(sc->mic_reqd &&
(sc->ctx_flags & GSS_C_INTEG_FLAG))) {
/* Not obviously done; we may decide we're done later in
* init_ctx_call_init or handle_mic. */
*negState = ACCEPT_INCOMPLETE;
*tokflag = CONT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
} else {
/* mech finished on last pass and no MIC required, so done. */
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
}
cleanup:
if (supportedMech != GSS_C_NO_OID)
generic_gss_release_oid(&tmpmin, &supportedMech);
return ret;
}
/*
* Consistency checking and mechanism negotiation handling for second
* call of spnego_gss_init_sec_context(). Call init_ctx_reselect() to
* update internal state if acceptor has counter-proposed.
*/
static OM_uint32
init_ctx_nego(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ret = GSS_S_DEFECTIVE_TOKEN;
/*
* Both supportedMech and negState must be present in first
* acceptor token.
*/
if (supportedMech == GSS_C_NO_OID) {
*minor_status = ERR_SPNEGO_NO_MECH_FROM_ACCEPTOR;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
if (acc_negState == ACCEPT_DEFECTIVE_TOKEN) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
return GSS_S_DEFECTIVE_TOKEN;
}
/*
* If the mechanism we sent is not the mechanism returned from
* the server, we need to handle the server's counter
* proposal. There is a bug in SAMBA servers that always send
* the old Kerberos mech OID, even though we sent the new one.
* So we will treat all the Kerberos mech OIDS as the same.
*/
if (!(is_kerb_mech(supportedMech) &&
is_kerb_mech(sc->internal_mech)) &&
!g_OID_equal(supportedMech, sc->internal_mech)) {
ret = init_ctx_reselect(minor_status, sc,
acc_negState, supportedMech,
responseToken, mechListMIC,
negState, tokflag);
} else if (*responseToken == GSS_C_NO_BUFFER) {
if (sc->mech_complete) {
/*
* Mech completed on first call to its
* init_sec_context(). Acceptor sends no mech
* token.
*/
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else {
/*
* Reject missing mech token when optimistic
* mech selected.
*/
*minor_status = ERR_SPNEGO_NO_TOKEN_FROM_ACCEPTOR;
map_errcode(minor_status);
ret = GSS_S_DEFECTIVE_TOKEN;
}
} else if ((*responseToken)->length == 0 && sc->mech_complete) {
/* Handle old IIS servers returning empty token instead of
* null tokens in the non-mutual auth case. */
*negState = ACCEPT_COMPLETE;
*tokflag = NO_TOKEN_SEND;
ret = GSS_S_COMPLETE;
} else if (sc->mech_complete) {
/* Reject spurious mech token. */
ret = GSS_S_DEFECTIVE_TOKEN;
} else {
*negState = ACCEPT_INCOMPLETE;
*tokflag = CONT_TOKEN_SEND;
ret = GSS_S_CONTINUE_NEEDED;
}
sc->nego_done = 1;
return ret;
}
/*
* Handle acceptor's counter-proposal of an alternative mechanism.
*/
static OM_uint32
init_ctx_reselect(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
OM_uint32 acc_negState, gss_OID supportedMech,
gss_buffer_t *responseToken, gss_buffer_t *mechListMIC,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 tmpmin;
size_t i;
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
/* Find supportedMech in sc->mech_set. */
for (i = 0; i < sc->mech_set->count; i++) {
if (g_OID_equal(supportedMech, &sc->mech_set->elements[i]))
break;
}
if (i == sc->mech_set->count)
return GSS_S_DEFECTIVE_TOKEN;
sc->internal_mech = &sc->mech_set->elements[i];
/*
* Windows 2003 and earlier don't correctly send a
* negState of request-mic when counter-proposing a
* mechanism. They probably don't handle mechListMICs
* properly either.
*/
if (acc_negState != REQUEST_MIC)
return GSS_S_DEFECTIVE_TOKEN;
sc->mech_complete = 0;
sc->mic_reqd = 1;
*negState = REQUEST_MIC;
*tokflag = CONT_TOKEN_SEND;
return GSS_S_CONTINUE_NEEDED;
}
/*
* Wrap call to mechanism gss_init_sec_context() and update state
* accordingly.
*/
static OM_uint32
init_ctx_call_init(OM_uint32 *minor_status,
spnego_gss_ctx_id_t sc,
spnego_gss_cred_id_t spcred,
gss_name_t target_name,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_buffer_t mechtok_in,
gss_OID *actual_mech,
gss_buffer_t mechtok_out,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
OM_uint32 *negState,
send_token_flag *send_token)
{
OM_uint32 ret, tmpret, tmpmin;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_init_sec_context(minor_status,
mcred,
&sc->ctx_handle,
target_name,
sc->internal_mech,
(req_flags | GSS_C_INTEG_FLAG),
time_req,
GSS_C_NO_CHANNEL_BINDINGS,
mechtok_in,
&sc->actual_mech,
mechtok_out,
&sc->ctx_flags,
time_rec);
if (ret == GSS_S_COMPLETE) {
sc->mech_complete = 1;
if (ret_flags != NULL)
*ret_flags = sc->ctx_flags;
/*
* Microsoft SPNEGO implementations expect an even number of
* token exchanges. So if we're sending a final token, ask for
* a zero-length token back from the server. Also ask for a
* token back if this is the first token or if a MIC exchange
* is required.
*/
if (*send_token == CONT_TOKEN_SEND &&
mechtok_out->length == 0 &&
(!sc->mic_reqd ||
!(sc->ctx_flags & GSS_C_INTEG_FLAG))) {
/* The exchange is complete. */
*negState = ACCEPT_COMPLETE;
ret = GSS_S_COMPLETE;
*send_token = NO_TOKEN_SEND;
} else {
/* Ask for one more hop. */
*negState = ACCEPT_INCOMPLETE;
ret = GSS_S_CONTINUE_NEEDED;
}
return ret;
}
if (ret == GSS_S_CONTINUE_NEEDED)
return ret;
if (*send_token != INIT_TOKEN_SEND) {
*send_token = ERROR_TOKEN_SEND;
*negState = REJECT;
return ret;
}
/*
* Since this is the first token, we can fall back to later mechanisms
* in the list. Since the mechanism list is expected to be short, we
* can do this with recursion. If all mechanisms produce errors, the
* caller should get the error from the first mech in the list.
*/
gssalloc_free(sc->mech_set->elements->elements);
memmove(sc->mech_set->elements, sc->mech_set->elements + 1,
--sc->mech_set->count * sizeof(*sc->mech_set->elements));
if (sc->mech_set->count == 0)
goto fail;
gss_release_buffer(&tmpmin, &sc->DER_mechTypes);
if (put_mech_set(sc->mech_set, &sc->DER_mechTypes) < 0)
goto fail;
tmpret = init_ctx_call_init(&tmpmin, sc, spcred, target_name,
req_flags, time_req, mechtok_in,
actual_mech, mechtok_out, ret_flags,
time_rec, negState, send_token);
if (HARD_ERROR(tmpret))
goto fail;
*minor_status = tmpmin;
return tmpret;
fail:
/* Don't output token on error from first call. */
*send_token = NO_TOKEN_SEND;
*negState = REJECT;
return ret;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_init_sec_context(
OM_uint32 *minor_status,
gss_cred_id_t claimant_cred_handle,
gss_ctx_id_t *context_handle,
gss_name_t target_name,
gss_OID mech_type,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_channel_bindings_t input_chan_bindings,
gss_buffer_t input_token,
gss_OID *actual_mech,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec)
{
send_token_flag send_token = NO_TOKEN_SEND;
OM_uint32 tmpmin, ret, negState;
gss_buffer_t mechtok_in, mechListMIC_in, mechListMIC_out;
gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER;
spnego_gss_cred_id_t spcred = NULL;
spnego_gss_ctx_id_t spnego_ctx = NULL;
dsyslog("Entering init_sec_context\n");
mechtok_in = mechListMIC_out = mechListMIC_in = GSS_C_NO_BUFFER;
negState = REJECT;
/*
* This function works in three steps:
*
* 1. Perform mechanism negotiation.
* 2. Invoke the negotiated or optimistic mech's gss_init_sec_context
* function and examine the results.
* 3. Process or generate MICs if necessary.
*
* The three steps share responsibility for determining when the
* exchange is complete. If the selected mech completed in a previous
* call and no MIC exchange is expected, then step 1 will decide. If
* the selected mech completes in this call and no MIC exchange is
* expected, then step 2 will decide. If a MIC exchange is expected,
* then step 3 will decide. If an error occurs in any step, the
* exchange will be aborted, possibly with an error token.
*
* negState determines the state of the negotiation, and is
* communicated to the acceptor if a continuing token is sent.
* send_token is used to indicate what type of token, if any, should be
* generated.
*/
/* Validate arguments. */
if (minor_status != NULL)
*minor_status = 0;
if (output_token != GSS_C_NO_BUFFER) {
output_token->length = 0;
output_token->value = NULL;
}
if (minor_status == NULL ||
output_token == GSS_C_NO_BUFFER ||
context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (actual_mech != NULL)
*actual_mech = GSS_C_NO_OID;
/* Step 1: perform mechanism negotiation. */
spcred = (spnego_gss_cred_id_t)claimant_cred_handle;
if (*context_handle == GSS_C_NO_CONTEXT) {
ret = init_ctx_new(minor_status, spcred,
context_handle, &send_token);
if (ret != GSS_S_CONTINUE_NEEDED) {
goto cleanup;
}
} else {
ret = init_ctx_cont(minor_status, context_handle,
input_token, &mechtok_in,
&mechListMIC_in, &negState, &send_token);
if (HARD_ERROR(ret)) {
goto cleanup;
}
}
/* Step 2: invoke the selected or optimistic mechanism's
* gss_init_sec_context function, if it didn't complete previously. */
spnego_ctx = (spnego_gss_ctx_id_t)*context_handle;
if (!spnego_ctx->mech_complete) {
ret = init_ctx_call_init(
minor_status, spnego_ctx, spcred,
target_name, req_flags,
time_req, mechtok_in,
actual_mech, &mechtok_out,
ret_flags, time_rec,
&negState, &send_token);
}
/* Step 3: process or generate the MIC, if the negotiated mech is
* complete and supports MICs. */
if (!HARD_ERROR(ret) && spnego_ctx->mech_complete &&
(spnego_ctx->ctx_flags & GSS_C_INTEG_FLAG)) {
ret = handle_mic(minor_status,
mechListMIC_in,
(mechtok_out.length != 0),
spnego_ctx, &mechListMIC_out,
&negState, &send_token);
}
cleanup:
if (send_token == INIT_TOKEN_SEND) {
if (make_spnego_tokenInit_msg(spnego_ctx,
0,
mechListMIC_out,
req_flags,
&mechtok_out, send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
} else if (send_token != NO_TOKEN_SEND) {
if (make_spnego_tokenTarg_msg(negState, GSS_C_NO_OID,
&mechtok_out, mechListMIC_out,
send_token,
output_token) < 0) {
ret = GSS_S_FAILURE;
}
}
gss_release_buffer(&tmpmin, &mechtok_out);
if (ret == GSS_S_COMPLETE) {
/*
* Now, switch the output context to refer to the
* negotiated mechanism's context.
*/
*context_handle = (gss_ctx_id_t)spnego_ctx->ctx_handle;
if (actual_mech != NULL)
*actual_mech = spnego_ctx->actual_mech;
if (ret_flags != NULL)
*ret_flags = spnego_ctx->ctx_flags;
release_spnego_ctx(&spnego_ctx);
} else if (ret != GSS_S_CONTINUE_NEEDED) {
if (spnego_ctx != NULL) {
gss_delete_sec_context(&tmpmin,
&spnego_ctx->ctx_handle,
GSS_C_NO_BUFFER);
release_spnego_ctx(&spnego_ctx);
}
*context_handle = GSS_C_NO_CONTEXT;
}
if (mechtok_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechtok_in);
free(mechtok_in);
}
if (mechListMIC_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_in);
free(mechListMIC_in);
}
if (mechListMIC_out != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechListMIC_out);
free(mechListMIC_out);
}
return ret;
} /* init_sec_context */
/* We don't want to import KRB5 headers here */
static const gss_OID_desc gss_mech_krb5_oid =
{ 9, "\052\206\110\206\367\022\001\002\002" };
static const gss_OID_desc gss_mech_krb5_wrong_oid =
{ 9, "\052\206\110\202\367\022\001\002\002" };
/*
* verify that the input token length is not 0. If it is, just return.
* If the token length is greater than 0, der encode as a sequence
* and place in buf_out, advancing buf_out.
*/
static int
put_neg_hints(unsigned char **buf_out, gss_buffer_t input_token,
unsigned int buflen)
{
int ret;
/* if token length is 0, we do not want to send */
if (input_token->length == 0)
return (0);
if (input_token->length > buflen)
return (-1);
*(*buf_out)++ = SEQUENCE;
if ((ret = gssint_put_der_length(input_token->length, buf_out,
input_token->length)))
return (ret);
TWRITE_STR(*buf_out, input_token->value, input_token->length);
return (0);
}
/*
* NegHints ::= SEQUENCE {
* hintName [0] GeneralString OPTIONAL,
* hintAddress [1] OCTET STRING OPTIONAL
* }
*/
#define HOST_PREFIX "host@"
#define HOST_PREFIX_LEN (sizeof(HOST_PREFIX) - 1)
static int
make_NegHints(OM_uint32 *minor_status,
spnego_gss_cred_id_t spcred, gss_buffer_t *outbuf)
{
gss_buffer_desc hintNameBuf;
gss_name_t hintName = GSS_C_NO_NAME;
gss_name_t hintKerberosName;
gss_OID hintNameType;
OM_uint32 major_status;
OM_uint32 minor;
unsigned int tlen = 0;
unsigned int hintNameSize = 0;
unsigned char *ptr;
unsigned char *t;
*outbuf = GSS_C_NO_BUFFER;
if (spcred != NULL) {
major_status = gss_inquire_cred(minor_status,
spcred->mcred,
&hintName,
NULL,
NULL,
NULL);
if (major_status != GSS_S_COMPLETE)
return (major_status);
}
if (hintName == GSS_C_NO_NAME) {
krb5_error_code code;
krb5int_access kaccess;
char hostname[HOST_PREFIX_LEN + MAXHOSTNAMELEN + 1] = HOST_PREFIX;
code = krb5int_accessor(&kaccess, KRB5INT_ACCESS_VERSION);
if (code != 0) {
*minor_status = code;
return (GSS_S_FAILURE);
}
/* this breaks mutual authentication but Samba relies on it */
code = (*kaccess.clean_hostname)(NULL, NULL,
&hostname[HOST_PREFIX_LEN],
MAXHOSTNAMELEN);
if (code != 0) {
*minor_status = code;
return (GSS_S_FAILURE);
}
hintNameBuf.value = hostname;
hintNameBuf.length = strlen(hostname);
major_status = gss_import_name(minor_status,
&hintNameBuf,
GSS_C_NT_HOSTBASED_SERVICE,
&hintName);
if (major_status != GSS_S_COMPLETE) {
return (major_status);
}
}
hintNameBuf.value = NULL;
hintNameBuf.length = 0;
major_status = gss_canonicalize_name(minor_status,
hintName,
(gss_OID)&gss_mech_krb5_oid,
&hintKerberosName);
if (major_status != GSS_S_COMPLETE) {
gss_release_name(&minor, &hintName);
return (major_status);
}
gss_release_name(&minor, &hintName);
major_status = gss_display_name(minor_status,
hintKerberosName,
&hintNameBuf,
&hintNameType);
if (major_status != GSS_S_COMPLETE) {
gss_release_name(&minor, &hintName);
return (major_status);
}
gss_release_name(&minor, &hintKerberosName);
/*
* Now encode the name hint into a NegHints ASN.1 type
*/
major_status = GSS_S_FAILURE;
/* Length of DER encoded GeneralString */
tlen = 1 + gssint_der_length_size(hintNameBuf.length) +
hintNameBuf.length;
hintNameSize = tlen;
/* Length of DER encoded hintName */
tlen += 1 + gssint_der_length_size(hintNameSize);
t = gssalloc_malloc(tlen);
if (t == NULL) {
*minor_status = ENOMEM;
goto errout;
}
ptr = t;
*ptr++ = CONTEXT | 0x00; /* hintName identifier */
if (gssint_put_der_length(hintNameSize,
&ptr, tlen - (int)(ptr-t)))
goto errout;
*ptr++ = GENERAL_STRING;
if (gssint_put_der_length(hintNameBuf.length,
&ptr, tlen - (int)(ptr-t)))
goto errout;
memcpy(ptr, hintNameBuf.value, hintNameBuf.length);
ptr += hintNameBuf.length;
*outbuf = (gss_buffer_t)malloc(sizeof(gss_buffer_desc));
if (*outbuf == NULL) {
*minor_status = ENOMEM;
goto errout;
}
(*outbuf)->value = (void *)t;
(*outbuf)->length = ptr - t;
t = NULL; /* don't free */
*minor_status = 0;
major_status = GSS_S_COMPLETE;
errout:
if (t != NULL) {
free(t);
}
gss_release_buffer(&minor, &hintNameBuf);
return (major_status);
}
/*
* Support the Microsoft NegHints extension to SPNEGO for compatibility with
* some versions of Samba. See:
* http://msdn.microsoft.com/en-us/library/cc247039(PROT.10).aspx
*/
static OM_uint32
acc_ctx_hints(OM_uint32 *minor_status,
gss_ctx_id_t *ctx,
spnego_gss_cred_id_t spcred,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 tmpmin, ret;
gss_OID_set supported_mechSet;
spnego_gss_ctx_id_t sc = NULL;
*mechListMIC = GSS_C_NO_BUFFER;
supported_mechSet = GSS_C_NO_OID_SET;
*return_token = NO_TOKEN_SEND;
*negState = REJECT;
*minor_status = 0;
/* A hint request must be the first token received. */
if (*ctx != GSS_C_NO_CONTEXT)
return GSS_S_DEFECTIVE_TOKEN;
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT,
&supported_mechSet);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = make_NegHints(minor_status, spcred, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
sc = create_spnego_ctx();
if (sc == NULL) {
ret = GSS_S_FAILURE;
goto cleanup;
}
if (put_mech_set(supported_mechSet, &sc->DER_mechTypes) < 0) {
ret = GSS_S_FAILURE;
goto cleanup;
}
sc->internal_mech = GSS_C_NO_OID;
*negState = ACCEPT_INCOMPLETE;
*return_token = INIT_TOKEN_SEND;
sc->firstpass = 1;
*ctx = (gss_ctx_id_t)sc;
sc = NULL;
ret = GSS_S_COMPLETE;
cleanup:
release_spnego_ctx(&sc);
gss_release_oid_set(&tmpmin, &supported_mechSet);
return ret;
}
/*
* Set negState to REJECT if the token is defective, else
* ACCEPT_INCOMPLETE or REQUEST_MIC, depending on whether initiator's
* preferred mechanism is supported.
*/
static OM_uint32
acc_ctx_new(OM_uint32 *minor_status,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
spnego_gss_cred_id_t spcred,
gss_buffer_t *mechToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 tmpmin, ret, req_flags;
gss_OID_set supported_mechSet, mechTypes;
gss_buffer_desc der_mechTypes;
gss_OID mech_wanted;
spnego_gss_ctx_id_t sc = NULL;
ret = GSS_S_DEFECTIVE_TOKEN;
der_mechTypes.length = 0;
der_mechTypes.value = NULL;
*mechToken = *mechListMIC = GSS_C_NO_BUFFER;
supported_mechSet = mechTypes = GSS_C_NO_OID_SET;
*return_token = ERROR_TOKEN_SEND;
*negState = REJECT;
*minor_status = 0;
ret = get_negTokenInit(minor_status, buf, &der_mechTypes,
&mechTypes, &req_flags,
mechToken, mechListMIC);
if (ret != GSS_S_COMPLETE) {
goto cleanup;
}
ret = get_negotiable_mechs(minor_status, spcred, GSS_C_ACCEPT,
&supported_mechSet);
if (ret != GSS_S_COMPLETE) {
*return_token = NO_TOKEN_SEND;
goto cleanup;
}
/*
* Select the best match between the list of mechs
* that the initiator requested and the list that
* the acceptor will support.
*/
mech_wanted = negotiate_mech(supported_mechSet, mechTypes, negState);
if (*negState == REJECT) {
ret = GSS_S_BAD_MECH;
goto cleanup;
}
sc = (spnego_gss_ctx_id_t)*ctx;
if (sc != NULL) {
gss_release_buffer(&tmpmin, &sc->DER_mechTypes);
assert(mech_wanted != GSS_C_NO_OID);
} else
sc = create_spnego_ctx();
if (sc == NULL) {
ret = GSS_S_FAILURE;
*return_token = NO_TOKEN_SEND;
goto cleanup;
}
sc->mech_set = mechTypes;
mechTypes = GSS_C_NO_OID_SET;
sc->internal_mech = mech_wanted;
sc->DER_mechTypes = der_mechTypes;
der_mechTypes.length = 0;
der_mechTypes.value = NULL;
if (*negState == REQUEST_MIC)
sc->mic_reqd = 1;
*return_token = INIT_TOKEN_SEND;
sc->firstpass = 1;
*ctx = (gss_ctx_id_t)sc;
ret = GSS_S_COMPLETE;
cleanup:
gss_release_oid_set(&tmpmin, &mechTypes);
gss_release_oid_set(&tmpmin, &supported_mechSet);
if (der_mechTypes.length != 0)
gss_release_buffer(&tmpmin, &der_mechTypes);
return ret;
}
static OM_uint32
acc_ctx_cont(OM_uint32 *minstat,
gss_buffer_t buf,
gss_ctx_id_t *ctx,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC,
OM_uint32 *negState,
send_token_flag *return_token)
{
OM_uint32 ret, tmpmin;
gss_OID supportedMech;
spnego_gss_ctx_id_t sc;
unsigned int len;
unsigned char *ptr, *bufstart;
sc = (spnego_gss_ctx_id_t)*ctx;
ret = GSS_S_DEFECTIVE_TOKEN;
*negState = REJECT;
*minstat = 0;
supportedMech = GSS_C_NO_OID;
*return_token = ERROR_TOKEN_SEND;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
#define REMAIN (buf->length - (ptr - bufstart))
if (REMAIN == 0 || REMAIN > INT_MAX)
return GSS_S_DEFECTIVE_TOKEN;
/*
* Attempt to work with old Sun SPNEGO.
*/
if (*ptr == HEADER_ID) {
ret = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (ret) {
*minstat = ret;
return GSS_S_DEFECTIVE_TOKEN;
}
}
if (*ptr != (CONTEXT | 0x01)) {
return GSS_S_DEFECTIVE_TOKEN;
}
ret = get_negTokenResp(minstat, ptr, REMAIN,
negState, &supportedMech,
responseToken, mechListMIC);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (*responseToken == GSS_C_NO_BUFFER &&
*mechListMIC == GSS_C_NO_BUFFER) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
if (supportedMech != GSS_C_NO_OID) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto cleanup;
}
sc->firstpass = 0;
*negState = ACCEPT_INCOMPLETE;
*return_token = CONT_TOKEN_SEND;
cleanup:
if (supportedMech != GSS_C_NO_OID) {
generic_gss_release_oid(&tmpmin, &supportedMech);
}
return ret;
#undef REMAIN
}
/*
* Verify that mech OID is either exactly the same as the negotiated
* mech OID, or is a mech OID supported by the negotiated mech. MS
* implementations can list a most preferred mech using an incorrect
* krb5 OID while emitting a krb5 initiator mech token having the
* correct krb5 mech OID.
*/
static OM_uint32
acc_ctx_vfy_oid(OM_uint32 *minor_status,
spnego_gss_ctx_id_t sc, gss_OID mechoid,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret, tmpmin;
gss_mechanism mech = NULL;
gss_OID_set mech_set = GSS_C_NO_OID_SET;
int present = 0;
if (g_OID_equal(sc->internal_mech, mechoid))
return GSS_S_COMPLETE;
mech = gssint_get_mechanism(sc->internal_mech);
if (mech == NULL || mech->gss_indicate_mechs == NULL) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
return GSS_S_BAD_MECH;
}
ret = mech->gss_indicate_mechs(minor_status, &mech_set);
if (ret != GSS_S_COMPLETE) {
*tokflag = NO_TOKEN_SEND;
map_error(minor_status, mech);
goto cleanup;
}
ret = gss_test_oid_set_member(minor_status, mechoid,
mech_set, &present);
if (ret != GSS_S_COMPLETE)
goto cleanup;
if (!present) {
*minor_status = ERR_SPNEGO_NEGOTIATION_FAILED;
map_errcode(minor_status);
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
ret = GSS_S_BAD_MECH;
}
cleanup:
gss_release_oid_set(&tmpmin, &mech_set);
return ret;
}
#ifndef LEAN_CLIENT
/*
* Wrap call to gss_accept_sec_context() and update state
* accordingly.
*/
static OM_uint32
acc_ctx_call_acc(OM_uint32 *minor_status, spnego_gss_ctx_id_t sc,
spnego_gss_cred_id_t spcred, gss_buffer_t mechtok_in,
gss_OID *mech_type, gss_buffer_t mechtok_out,
OM_uint32 *ret_flags, OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle,
OM_uint32 *negState, send_token_flag *tokflag)
{
OM_uint32 ret;
gss_OID_desc mechoid;
gss_cred_id_t mcred;
if (sc->ctx_handle == GSS_C_NO_CONTEXT) {
/*
* mechoid is an alias; don't free it.
*/
ret = gssint_get_mech_type(&mechoid, mechtok_in);
if (ret != GSS_S_COMPLETE) {
*tokflag = NO_TOKEN_SEND;
return ret;
}
ret = acc_ctx_vfy_oid(minor_status, sc, &mechoid,
negState, tokflag);
if (ret != GSS_S_COMPLETE)
return ret;
}
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_accept_sec_context(minor_status,
&sc->ctx_handle,
mcred,
mechtok_in,
GSS_C_NO_CHANNEL_BINDINGS,
&sc->internal_name,
mech_type,
mechtok_out,
&sc->ctx_flags,
time_rec,
delegated_cred_handle);
if (ret == GSS_S_COMPLETE) {
#ifdef MS_BUG_TEST
/*
* Force MIC to be not required even if we previously
* requested a MIC.
*/
char *envstr = getenv("MS_FORCE_NO_MIC");
if (envstr != NULL && strcmp(envstr, "1") == 0 &&
!(sc->ctx_flags & GSS_C_MUTUAL_FLAG) &&
sc->mic_reqd) {
sc->mic_reqd = 0;
}
#endif
sc->mech_complete = 1;
if (ret_flags != NULL)
*ret_flags = sc->ctx_flags;
if (!sc->mic_reqd ||
!(sc->ctx_flags & GSS_C_INTEG_FLAG)) {
/* No MIC exchange required, so we're done. */
*negState = ACCEPT_COMPLETE;
ret = GSS_S_COMPLETE;
} else {
/* handle_mic will decide if we're done. */
ret = GSS_S_CONTINUE_NEEDED;
}
} else if (ret != GSS_S_CONTINUE_NEEDED) {
*negState = REJECT;
*tokflag = ERROR_TOKEN_SEND;
}
return ret;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_accept_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_cred_id_t verifier_cred_handle,
gss_buffer_t input_token,
gss_channel_bindings_t input_chan_bindings,
gss_name_t *src_name,
gss_OID *mech_type,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle)
{
OM_uint32 ret, tmpmin, negState;
send_token_flag return_token;
gss_buffer_t mechtok_in, mic_in, mic_out;
gss_buffer_desc mechtok_out = GSS_C_EMPTY_BUFFER;
spnego_gss_ctx_id_t sc = NULL;
spnego_gss_cred_id_t spcred = NULL;
int sendTokenInit = 0, tmpret;
mechtok_in = mic_in = mic_out = GSS_C_NO_BUFFER;
/*
* This function works in three steps:
*
* 1. Perform mechanism negotiation.
* 2. Invoke the negotiated mech's gss_accept_sec_context function
* and examine the results.
* 3. Process or generate MICs if necessary.
*
* Step one determines whether the negotiation requires a MIC exchange,
* while steps two and three share responsibility for determining when
* the exchange is complete. If the selected mech completes in this
* call and no MIC exchange is expected, then step 2 will decide. If a
* MIC exchange is expected, then step 3 will decide. If an error
* occurs in any step, the exchange will be aborted, possibly with an
* error token.
*
* negState determines the state of the negotiation, and is
* communicated to the acceptor if a continuing token is sent.
* return_token is used to indicate what type of token, if any, should
* be generated.
*/
/* Validate arguments. */
if (minor_status != NULL)
*minor_status = 0;
if (output_token != GSS_C_NO_BUFFER) {
output_token->length = 0;
output_token->value = NULL;
}
if (minor_status == NULL ||
output_token == GSS_C_NO_BUFFER ||
context_handle == NULL)
return GSS_S_CALL_INACCESSIBLE_WRITE;
if (input_token == GSS_C_NO_BUFFER)
return GSS_S_CALL_INACCESSIBLE_READ;
/* Step 1: Perform mechanism negotiation. */
sc = (spnego_gss_ctx_id_t)*context_handle;
spcred = (spnego_gss_cred_id_t)verifier_cred_handle;
if (sc == NULL || sc->internal_mech == GSS_C_NO_OID) {
/* Process an initial token or request for NegHints. */
if (src_name != NULL)
*src_name = GSS_C_NO_NAME;
if (mech_type != NULL)
*mech_type = GSS_C_NO_OID;
if (time_rec != NULL)
*time_rec = 0;
if (ret_flags != NULL)
*ret_flags = 0;
if (delegated_cred_handle != NULL)
*delegated_cred_handle = GSS_C_NO_CREDENTIAL;
if (input_token->length == 0) {
ret = acc_ctx_hints(minor_status,
context_handle, spcred,
&mic_out,
&negState,
&return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
sendTokenInit = 1;
ret = GSS_S_CONTINUE_NEEDED;
} else {
/* Can set negState to REQUEST_MIC */
ret = acc_ctx_new(minor_status, input_token,
context_handle, spcred,
&mechtok_in, &mic_in,
&negState, &return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = GSS_S_CONTINUE_NEEDED;
}
} else {
/* Process a response token. Can set negState to
* ACCEPT_INCOMPLETE. */
ret = acc_ctx_cont(minor_status, input_token,
context_handle, &mechtok_in,
&mic_in, &negState, &return_token);
if (ret != GSS_S_COMPLETE)
goto cleanup;
ret = GSS_S_CONTINUE_NEEDED;
}
/* Step 2: invoke the negotiated mechanism's gss_accept_sec_context
* function. */
sc = (spnego_gss_ctx_id_t)*context_handle;
/*
* Handle mechtok_in and mic_in only if they are
* present in input_token. If neither is present, whether
* this is an error depends on whether this is the first
* round-trip. RET is set to a default value according to
* whether it is the first round-trip.
*/
if (negState != REQUEST_MIC && mechtok_in != GSS_C_NO_BUFFER) {
ret = acc_ctx_call_acc(minor_status, sc, spcred,
mechtok_in, mech_type, &mechtok_out,
ret_flags, time_rec,
delegated_cred_handle,
&negState, &return_token);
}
/* Step 3: process or generate the MIC, if the negotiated mech is
* complete and supports MICs. */
if (!HARD_ERROR(ret) && sc->mech_complete &&
(sc->ctx_flags & GSS_C_INTEG_FLAG)) {
ret = handle_mic(minor_status, mic_in,
(mechtok_out.length != 0),
sc, &mic_out,
&negState, &return_token);
}
cleanup:
if (return_token == INIT_TOKEN_SEND && sendTokenInit) {
assert(sc != NULL);
tmpret = make_spnego_tokenInit_msg(sc, 1, mic_out, 0,
GSS_C_NO_BUFFER,
return_token, output_token);
if (tmpret < 0)
ret = GSS_S_FAILURE;
} else if (return_token != NO_TOKEN_SEND &&
return_token != CHECK_MIC) {
tmpret = make_spnego_tokenTarg_msg(negState,
sc ? sc->internal_mech :
GSS_C_NO_OID,
&mechtok_out, mic_out,
return_token,
output_token);
if (tmpret < 0)
ret = GSS_S_FAILURE;
}
if (ret == GSS_S_COMPLETE) {
*context_handle = (gss_ctx_id_t)sc->ctx_handle;
if (sc->internal_name != GSS_C_NO_NAME &&
src_name != NULL) {
*src_name = sc->internal_name;
sc->internal_name = GSS_C_NO_NAME;
}
release_spnego_ctx(&sc);
} else if (ret != GSS_S_CONTINUE_NEEDED) {
if (sc != NULL) {
gss_delete_sec_context(&tmpmin, &sc->ctx_handle,
GSS_C_NO_BUFFER);
release_spnego_ctx(&sc);
}
*context_handle = GSS_C_NO_CONTEXT;
}
gss_release_buffer(&tmpmin, &mechtok_out);
if (mechtok_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mechtok_in);
free(mechtok_in);
}
if (mic_in != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mic_in);
free(mic_in);
}
if (mic_out != GSS_C_NO_BUFFER) {
gss_release_buffer(&tmpmin, mic_out);
free(mic_out);
}
return ret;
}
#endif /* LEAN_CLIENT */
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_display_status(
OM_uint32 *minor_status,
OM_uint32 status_value,
int status_type,
gss_OID mech_type,
OM_uint32 *message_context,
gss_buffer_t status_string)
{
OM_uint32 maj = GSS_S_COMPLETE;
int ret;
dsyslog("Entering display_status\n");
*message_context = 0;
switch (status_value) {
case ERR_SPNEGO_NO_MECHS_AVAILABLE:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO cannot find "
"mechanisms to negotiate"));
break;
case ERR_SPNEGO_NO_CREDS_ACQUIRED:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO failed to acquire "
"creds"));
break;
case ERR_SPNEGO_NO_MECH_FROM_ACCEPTOR:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO acceptor did not "
"select a mechanism"));
break;
case ERR_SPNEGO_NEGOTIATION_FAILED:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO failed to negotiate a "
"mechanism"));
break;
case ERR_SPNEGO_NO_TOKEN_FROM_ACCEPTOR:
/* CSTYLED */
*status_string = make_err_msg(_("SPNEGO acceptor did not "
"return a valid token"));
break;
default:
/* Not one of our minor codes; might be from a mech. Call back
* to gss_display_status, but first check for recursion. */
if (k5_getspecific(K5_KEY_GSS_SPNEGO_STATUS) != NULL) {
/* Perhaps we returned a com_err code like ENOMEM. */
const char *err = error_message(status_value);
*status_string = make_err_msg(err);
break;
}
/* Set a non-null pointer value; doesn't matter which one. */
ret = k5_setspecific(K5_KEY_GSS_SPNEGO_STATUS, &ret);
if (ret != 0) {
*minor_status = ret;
maj = GSS_S_FAILURE;
break;
}
maj = gss_display_status(minor_status, status_value,
status_type, mech_type,
message_context, status_string);
/* This is unlikely to fail; not much we can do if it does. */
(void)k5_setspecific(K5_KEY_GSS_SPNEGO_STATUS, NULL);
break;
}
dsyslog("Leaving display_status\n");
return maj;
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_import_name(
OM_uint32 *minor_status,
gss_buffer_t input_name_buffer,
gss_OID input_name_type,
gss_name_t *output_name)
{
OM_uint32 status;
dsyslog("Entering import_name\n");
status = gss_import_name(minor_status, input_name_buffer,
input_name_type, output_name);
dsyslog("Leaving import_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_release_name(
OM_uint32 *minor_status,
gss_name_t *input_name)
{
OM_uint32 status;
dsyslog("Entering release_name\n");
status = gss_release_name(minor_status, input_name);
dsyslog("Leaving release_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_duplicate_name(
OM_uint32 *minor_status,
const gss_name_t input_name,
gss_name_t *output_name)
{
OM_uint32 status;
dsyslog("Entering duplicate_name\n");
status = gss_duplicate_name(minor_status, input_name, output_name);
dsyslog("Leaving duplicate_name\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_cred(
OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
gss_name_t *name,
OM_uint32 *lifetime,
int *cred_usage,
gss_OID_set *mechanisms)
{
OM_uint32 status;
spnego_gss_cred_id_t spcred = NULL;
gss_cred_id_t creds = GSS_C_NO_CREDENTIAL;
OM_uint32 tmp_minor_status;
OM_uint32 initiator_lifetime, acceptor_lifetime;
dsyslog("Entering inquire_cred\n");
/*
* To avoid infinite recursion, if GSS_C_NO_CREDENTIAL is
* supplied we call gss_inquire_cred_by_mech() on the
* first non-SPNEGO mechanism.
*/
spcred = (spnego_gss_cred_id_t)cred_handle;
if (spcred == NULL) {
status = get_available_mechs(minor_status,
GSS_C_NO_NAME,
GSS_C_BOTH,
GSS_C_NO_CRED_STORE,
&creds,
mechanisms);
if (status != GSS_S_COMPLETE) {
dsyslog("Leaving inquire_cred\n");
return (status);
}
if ((*mechanisms)->count == 0) {
gss_release_cred(&tmp_minor_status, &creds);
gss_release_oid_set(&tmp_minor_status, mechanisms);
dsyslog("Leaving inquire_cred\n");
return (GSS_S_DEFECTIVE_CREDENTIAL);
}
assert((*mechanisms)->elements != NULL);
status = gss_inquire_cred_by_mech(minor_status,
creds,
&(*mechanisms)->elements[0],
name,
&initiator_lifetime,
&acceptor_lifetime,
cred_usage);
if (status != GSS_S_COMPLETE) {
gss_release_cred(&tmp_minor_status, &creds);
dsyslog("Leaving inquire_cred\n");
return (status);
}
if (lifetime != NULL)
*lifetime = (*cred_usage == GSS_C_ACCEPT) ?
acceptor_lifetime : initiator_lifetime;
gss_release_cred(&tmp_minor_status, &creds);
} else {
status = gss_inquire_cred(minor_status, spcred->mcred,
name, lifetime,
cred_usage, mechanisms);
}
dsyslog("Leaving inquire_cred\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_compare_name(
OM_uint32 *minor_status,
const gss_name_t name1,
const gss_name_t name2,
int *name_equal)
{
OM_uint32 status = GSS_S_COMPLETE;
dsyslog("Entering compare_name\n");
status = gss_compare_name(minor_status, name1, name2, name_equal);
dsyslog("Leaving compare_name\n");
return (status);
}
/*ARGSUSED*/
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_display_name(
OM_uint32 *minor_status,
gss_name_t input_name,
gss_buffer_t output_name_buffer,
gss_OID *output_name_type)
{
OM_uint32 status = GSS_S_COMPLETE;
dsyslog("Entering display_name\n");
status = gss_display_name(minor_status, input_name,
output_name_buffer, output_name_type);
dsyslog("Leaving display_name\n");
return (status);
}
/*ARGSUSED*/
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_names_for_mech(
OM_uint32 *minor_status,
gss_OID mechanism,
gss_OID_set *name_types)
{
OM_uint32 major, minor;
dsyslog("Entering inquire_names_for_mech\n");
/*
* We only know how to handle our own mechanism.
*/
if ((mechanism != GSS_C_NULL_OID) &&
!g_OID_equal(gss_mech_spnego, mechanism)) {
*minor_status = 0;
return (GSS_S_FAILURE);
}
major = gss_create_empty_oid_set(minor_status, name_types);
if (major == GSS_S_COMPLETE) {
/* Now add our members. */
if (((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_USER_NAME,
name_types)) == GSS_S_COMPLETE) &&
((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_MACHINE_UID_NAME,
name_types)) == GSS_S_COMPLETE) &&
((major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_STRING_UID_NAME,
name_types)) == GSS_S_COMPLETE)) {
major = gss_add_oid_set_member(minor_status,
(gss_OID) GSS_C_NT_HOSTBASED_SERVICE,
name_types);
}
if (major != GSS_S_COMPLETE)
(void) gss_release_oid_set(&minor, name_types);
}
dsyslog("Leaving inquire_names_for_mech\n");
return (major);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t output_message_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_unwrap(minor_status,
context_handle,
input_message_buffer,
output_message_buffer,
conf_state,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap(
OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_message_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
ret = gss_wrap(minor_status,
context_handle,
conf_req_flag,
qop_req,
input_message_buffer,
conf_state,
output_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_process_context_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t token_buffer)
{
OM_uint32 ret;
ret = gss_process_context_token(minor_status,
context_handle,
token_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_delete_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t output_token)
{
OM_uint32 ret = GSS_S_COMPLETE;
spnego_gss_ctx_id_t *ctx =
(spnego_gss_ctx_id_t *)context_handle;
*minor_status = 0;
if (context_handle == NULL)
return (GSS_S_FAILURE);
if (*ctx == NULL)
return (GSS_S_COMPLETE);
/*
* If this is still an SPNEGO mech, release it locally.
*/
if ((*ctx)->magic_num == SPNEGO_MAGIC_ID) {
(void) gss_delete_sec_context(minor_status,
&(*ctx)->ctx_handle,
output_token);
(void) release_spnego_ctx(ctx);
} else {
ret = gss_delete_sec_context(minor_status,
context_handle,
output_token);
}
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_context_time(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
OM_uint32 *time_rec)
{
OM_uint32 ret;
ret = gss_context_time(minor_status,
context_handle,
time_rec);
return (ret);
}
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV
spnego_gss_export_sec_context(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token)
{
OM_uint32 ret;
ret = gss_export_sec_context(minor_status,
context_handle,
interprocess_token);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_import_sec_context(
OM_uint32 *minor_status,
const gss_buffer_t interprocess_token,
gss_ctx_id_t *context_handle)
{
OM_uint32 ret;
ret = gss_import_sec_context(minor_status,
interprocess_token,
context_handle);
return (ret);
}
#endif /* LEAN_CLIENT */
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_context(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_name_t *src_name,
gss_name_t *targ_name,
OM_uint32 *lifetime_rec,
gss_OID *mech_type,
OM_uint32 *ctx_flags,
int *locally_initiated,
int *opened)
{
OM_uint32 ret = GSS_S_COMPLETE;
ret = gss_inquire_context(minor_status,
context_handle,
src_name,
targ_name,
lifetime_rec,
mech_type,
ctx_flags,
locally_initiated,
opened);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_size_limit(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
OM_uint32 req_output_size,
OM_uint32 *max_input_size)
{
OM_uint32 ret;
ret = gss_wrap_size_limit(minor_status,
context_handle,
conf_req_flag,
qop_req,
req_output_size,
max_input_size);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_qop_t qop_req,
const gss_buffer_t message_buffer,
gss_buffer_t message_token)
{
OM_uint32 ret;
ret = gss_get_mic(minor_status,
context_handle,
qop_req,
message_buffer,
message_token);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_verify_mic(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t msg_buffer,
const gss_buffer_t token_buffer,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_verify_mic(minor_status,
context_handle,
msg_buffer,
token_buffer,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_sec_context_by_oid(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
ret = gss_inquire_sec_context_by_oid(minor_status,
context_handle,
desired_object,
data_set);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_cred_by_oid(
OM_uint32 *minor_status,
const gss_cred_id_t cred_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_inquire_cred_by_oid(minor_status,
mcred,
desired_object,
data_set);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_cred_option(
OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 ret;
OM_uint32 tmp_minor_status;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)*cred_handle;
gss_cred_id_t mcred;
mcred = (spcred == NULL) ? GSS_C_NO_CREDENTIAL : spcred->mcred;
ret = gss_set_cred_option(minor_status,
&mcred,
desired_object,
value);
if (ret == GSS_S_COMPLETE && spcred == NULL) {
/*
* If the mechanism allocated a new credential handle, then
* we need to wrap it up in an SPNEGO credential handle.
*/
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
gss_release_cred(&tmp_minor_status, &mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->mcred = mcred;
spcred->neg_mechs = GSS_C_NULL_OID_SET;
*cred_handle = (gss_cred_id_t)spcred;
}
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_sec_context_option(
OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
const gss_OID desired_object,
const gss_buffer_t value)
{
OM_uint32 ret;
ret = gss_set_sec_context_option(minor_status,
context_handle,
desired_object,
value);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_assoc_buffer,
gss_buffer_t input_payload_buffer,
int *conf_state,
gss_buffer_t output_message_buffer)
{
OM_uint32 ret;
ret = gss_wrap_aead(minor_status,
context_handle,
conf_req_flag,
qop_req,
input_assoc_buffer,
input_payload_buffer,
conf_state,
output_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap_aead(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t input_assoc_buffer,
gss_buffer_t output_payload_buffer,
int *conf_state,
gss_qop_t *qop_state)
{
OM_uint32 ret;
ret = gss_unwrap_aead(minor_status,
context_handle,
input_message_buffer,
input_assoc_buffer,
output_payload_buffer,
conf_state,
qop_state);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_wrap_iov(minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_unwrap_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int *conf_state,
gss_qop_t *qop_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_unwrap_iov(minor_status,
context_handle,
conf_state,
qop_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_wrap_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 ret;
ret = gss_wrap_iov_length(minor_status,
context_handle,
conf_req_flag,
qop_req,
conf_state,
iov,
iov_count);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_complete_auth_token(
OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer)
{
OM_uint32 ret;
ret = gss_complete_auth_token(minor_status,
context_handle,
input_message_buffer);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_impersonate_name(OM_uint32 *minor_status,
const gss_cred_id_t impersonator_cred_handle,
const gss_name_t desired_name,
OM_uint32 time_req,
gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status;
gss_OID_set amechs = GSS_C_NULL_OID_SET;
spnego_gss_cred_id_t imp_spcred = NULL, out_spcred = NULL;
gss_cred_id_t imp_mcred, out_mcred;
dsyslog("Entering spnego_gss_acquire_cred_impersonate_name\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
imp_spcred = (spnego_gss_cred_id_t)impersonator_cred_handle;
imp_mcred = imp_spcred ? imp_spcred->mcred : GSS_C_NO_CREDENTIAL;
if (desired_mechs == GSS_C_NO_OID_SET) {
status = gss_inquire_cred(minor_status, imp_mcred, NULL, NULL,
NULL, &amechs);
if (status != GSS_S_COMPLETE)
return status;
desired_mechs = amechs;
}
status = gss_acquire_cred_impersonate_name(minor_status, imp_mcred,
desired_name, time_req,
desired_mechs, cred_usage,
&out_mcred, actual_mechs,
time_rec);
if (amechs != GSS_C_NULL_OID_SET)
(void) gss_release_oid_set(minor_status, &amechs);
out_spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (out_spcred == NULL) {
gss_release_cred(minor_status, &out_mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
out_spcred->mcred = out_mcred;
out_spcred->neg_mechs = GSS_C_NULL_OID_SET;
*output_cred_handle = (gss_cred_id_t)out_spcred;
dsyslog("Leaving spnego_gss_acquire_cred_impersonate_name\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_acquire_cred_with_password(OM_uint32 *minor_status,
const gss_name_t desired_name,
const gss_buffer_t password,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
gss_cred_usage_t cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec)
{
OM_uint32 status, tmpmin;
gss_OID_set amechs = GSS_C_NULL_OID_SET;
gss_cred_id_t mcred = NULL;
spnego_gss_cred_id_t spcred = NULL;
dsyslog("Entering spnego_gss_acquire_cred_with_password\n");
if (actual_mechs)
*actual_mechs = NULL;
if (time_rec)
*time_rec = 0;
status = get_available_mechs(minor_status, desired_name,
cred_usage, GSS_C_NO_CRED_STORE,
NULL, &amechs);
if (status != GSS_S_COMPLETE)
goto cleanup;
status = gss_acquire_cred_with_password(minor_status, desired_name,
password, time_req, amechs,
cred_usage, &mcred,
actual_mechs, time_rec);
if (status != GSS_S_COMPLETE)
goto cleanup;
spcred = malloc(sizeof(spnego_gss_cred_id_rec));
if (spcred == NULL) {
*minor_status = ENOMEM;
status = GSS_S_FAILURE;
goto cleanup;
}
spcred->neg_mechs = GSS_C_NULL_OID_SET;
spcred->mcred = mcred;
mcred = GSS_C_NO_CREDENTIAL;
*output_cred_handle = (gss_cred_id_t)spcred;
cleanup:
(void) gss_release_oid_set(&tmpmin, &amechs);
(void) gss_release_cred(&tmpmin, &mcred);
dsyslog("Leaving spnego_gss_acquire_cred_with_password\n");
return (status);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_display_name_ext(OM_uint32 *minor_status,
gss_name_t name,
gss_OID display_as_name_type,
gss_buffer_t display_name)
{
OM_uint32 ret;
ret = gss_display_name_ext(minor_status,
name,
display_as_name_type,
display_name);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_name(OM_uint32 *minor_status,
gss_name_t name,
int *name_is_MN,
gss_OID *MN_mech,
gss_buffer_set_t *attrs)
{
OM_uint32 ret;
ret = gss_inquire_name(minor_status,
name,
name_is_MN,
MN_mech,
attrs);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr,
int *authenticated,
int *complete,
gss_buffer_t value,
gss_buffer_t display_value,
int *more)
{
OM_uint32 ret;
ret = gss_get_name_attribute(minor_status,
name,
attr,
authenticated,
complete,
value,
display_value,
more);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
int complete,
gss_buffer_t attr,
gss_buffer_t value)
{
OM_uint32 ret;
ret = gss_set_name_attribute(minor_status,
name,
complete,
attr,
value);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_delete_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr)
{
OM_uint32 ret;
ret = gss_delete_name_attribute(minor_status,
name,
attr);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_export_name_composite(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t exp_composite_name)
{
OM_uint32 ret;
ret = gss_export_name_composite(minor_status,
name,
exp_composite_name);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_map_name_to_any(OM_uint32 *minor_status,
gss_name_t name,
int authenticated,
gss_buffer_t type_id,
gss_any_t *output)
{
OM_uint32 ret;
ret = gss_map_name_to_any(minor_status,
name,
authenticated,
type_id,
output);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_release_any_name_mapping(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t type_id,
gss_any_t *input)
{
OM_uint32 ret;
ret = gss_release_any_name_mapping(minor_status,
name,
type_id,
input);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_pseudo_random(OM_uint32 *minor_status,
gss_ctx_id_t context,
int prf_key,
const gss_buffer_t prf_in,
ssize_t desired_output_len,
gss_buffer_t prf_out)
{
OM_uint32 ret;
ret = gss_pseudo_random(minor_status,
context,
prf_key,
prf_in,
desired_output_len,
prf_out);
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_set_neg_mechs(OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
const gss_OID_set mech_list)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
/* Store mech_list in spcred for use in negotiation logic. */
gss_release_oid_set(minor_status, &spcred->neg_mechs);
ret = generic_gss_copy_oid_set(minor_status, mech_list,
&spcred->neg_mechs);
return (ret);
}
#define SPNEGO_SASL_NAME "SPNEGO"
#define SPNEGO_SASL_NAME_LEN (sizeof(SPNEGO_SASL_NAME) - 1)
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_mech_for_saslname(OM_uint32 *minor_status,
const gss_buffer_t sasl_mech_name,
gss_OID *mech_type)
{
if (sasl_mech_name->length == SPNEGO_SASL_NAME_LEN &&
memcmp(sasl_mech_name->value, SPNEGO_SASL_NAME,
SPNEGO_SASL_NAME_LEN) == 0) {
if (mech_type != NULL)
*mech_type = (gss_OID)gss_mech_spnego;
return (GSS_S_COMPLETE);
}
return (GSS_S_BAD_MECH);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_saslname_for_mech(OM_uint32 *minor_status,
const gss_OID desired_mech,
gss_buffer_t sasl_mech_name,
gss_buffer_t mech_name,
gss_buffer_t mech_description)
{
*minor_status = 0;
if (!g_OID_equal(desired_mech, gss_mech_spnego))
return (GSS_S_BAD_MECH);
if (!g_make_string_buffer(SPNEGO_SASL_NAME, sasl_mech_name) ||
!g_make_string_buffer("spnego", mech_name) ||
!g_make_string_buffer("Simple and Protected GSS-API "
"Negotiation Mechanism", mech_description))
goto fail;
return (GSS_S_COMPLETE);
fail:
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_inquire_attrs_for_mech(OM_uint32 *minor_status,
gss_const_OID mech,
gss_OID_set *mech_attrs,
gss_OID_set *known_mech_attrs)
{
OM_uint32 major, tmpMinor;
/* known_mech_attrs is handled by mechglue */
*minor_status = 0;
if (mech_attrs == NULL)
return (GSS_S_COMPLETE);
major = gss_create_empty_oid_set(minor_status, mech_attrs);
if (GSS_ERROR(major))
goto cleanup;
#define MA_SUPPORTED(ma) do { \
major = gss_add_oid_set_member(minor_status, \
(gss_OID)ma, mech_attrs); \
if (GSS_ERROR(major)) \
goto cleanup; \
} while (0)
MA_SUPPORTED(GSS_C_MA_MECH_NEGO);
MA_SUPPORTED(GSS_C_MA_ITOK_FRAMED);
cleanup:
if (GSS_ERROR(major))
gss_release_oid_set(&tmpMinor, mech_attrs);
return (major);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_export_cred(OM_uint32 *minor_status,
gss_cred_id_t cred_handle,
gss_buffer_t token)
{
spnego_gss_cred_id_t spcred = (spnego_gss_cred_id_t)cred_handle;
return (gss_export_cred(minor_status, spcred->mcred, token));
}
OM_uint32 KRB5_CALLCONV
spnego_gss_import_cred(OM_uint32 *minor_status,
gss_buffer_t token,
gss_cred_id_t *cred_handle)
{
OM_uint32 ret;
spnego_gss_cred_id_t spcred;
gss_cred_id_t mcred;
ret = gss_import_cred(minor_status, token, &mcred);
if (GSS_ERROR(ret))
return (ret);
spcred = malloc(sizeof(*spcred));
if (spcred == NULL) {
gss_release_cred(minor_status, &mcred);
*minor_status = ENOMEM;
return (GSS_S_FAILURE);
}
spcred->mcred = mcred;
spcred->neg_mechs = GSS_C_NULL_OID_SET;
*cred_handle = (gss_cred_id_t)spcred;
return (ret);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov,
int iov_count)
{
return gss_get_mic_iov(minor_status, context_handle, qop_req, iov,
iov_count);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_verify_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t *qop_state, gss_iov_buffer_desc *iov,
int iov_count)
{
return gss_verify_mic_iov(minor_status, context_handle, qop_state, iov,
iov_count);
}
OM_uint32 KRB5_CALLCONV
spnego_gss_get_mic_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, gss_qop_t qop_req,
gss_iov_buffer_desc *iov, int iov_count)
{
return gss_get_mic_iov_length(minor_status, context_handle, qop_req, iov,
iov_count);
}
/*
* We will release everything but the ctx_handle so that it
* can be passed back to init/accept context. This routine should
* not be called until after the ctx_handle memory is assigned to
* the supplied context handle from init/accept context.
*/
static void
release_spnego_ctx(spnego_gss_ctx_id_t *ctx)
{
spnego_gss_ctx_id_t context;
OM_uint32 minor_stat;
context = *ctx;
if (context != NULL) {
(void) gss_release_buffer(&minor_stat,
&context->DER_mechTypes);
(void) gss_release_oid_set(&minor_stat, &context->mech_set);
(void) gss_release_name(&minor_stat, &context->internal_name);
if (context->optionStr != NULL) {
free(context->optionStr);
context->optionStr = NULL;
}
free(context);
*ctx = NULL;
}
}
/*
* Can't use gss_indicate_mechs by itself to get available mechs for
* SPNEGO because it will also return the SPNEGO mech and we do not
* want to consider SPNEGO as an available security mech for
* negotiation. For this reason, get_available_mechs will return
* all available mechs except SPNEGO.
*
* If a ptr to a creds list is given, this function will attempt
* to acquire creds for the creds given and trim the list of
* returned mechanisms to only those for which creds are valid.
*
*/
static OM_uint32
get_available_mechs(OM_uint32 *minor_status,
gss_name_t name, gss_cred_usage_t usage,
gss_const_key_value_set_t cred_store,
gss_cred_id_t *creds, gss_OID_set *rmechs)
{
unsigned int i;
int found = 0;
OM_uint32 major_status = GSS_S_COMPLETE, tmpmin;
gss_OID_set mechs, goodmechs;
major_status = gss_indicate_mechs(minor_status, &mechs);
if (major_status != GSS_S_COMPLETE) {
return (major_status);
}
major_status = gss_create_empty_oid_set(minor_status, rmechs);
if (major_status != GSS_S_COMPLETE) {
(void) gss_release_oid_set(minor_status, &mechs);
return (major_status);
}
for (i = 0; i < mechs->count && major_status == GSS_S_COMPLETE; i++) {
if ((mechs->elements[i].length
!= spnego_mechanism.mech_type.length) ||
memcmp(mechs->elements[i].elements,
spnego_mechanism.mech_type.elements,
spnego_mechanism.mech_type.length)) {
major_status = gss_add_oid_set_member(minor_status,
&mechs->elements[i],
rmechs);
if (major_status == GSS_S_COMPLETE)
found++;
}
}
/*
* If the caller wanted a list of creds returned,
* trim the list of mechanisms down to only those
* for which the creds are valid.
*/
if (found > 0 && major_status == GSS_S_COMPLETE && creds != NULL) {
major_status = gss_acquire_cred_from(minor_status, name,
GSS_C_INDEFINITE,
*rmechs, usage,
cred_store, creds,
&goodmechs, NULL);
/*
* Drop the old list in favor of the new
* "trimmed" list.
*/
(void) gss_release_oid_set(&tmpmin, rmechs);
if (major_status == GSS_S_COMPLETE) {
(void) gssint_copy_oid_set(&tmpmin,
goodmechs, rmechs);
(void) gss_release_oid_set(&tmpmin, &goodmechs);
}
}
(void) gss_release_oid_set(&tmpmin, &mechs);
if (found == 0 || major_status != GSS_S_COMPLETE) {
*minor_status = ERR_SPNEGO_NO_MECHS_AVAILABLE;
map_errcode(minor_status);
if (major_status == GSS_S_COMPLETE)
major_status = GSS_S_FAILURE;
}
return (major_status);
}
/*
* Return a list of mechanisms we are willing to negotiate for a credential,
* taking into account the mech set provided with gss_set_neg_mechs if it
* exists.
*/
static OM_uint32
get_negotiable_mechs(OM_uint32 *minor_status, spnego_gss_cred_id_t spcred,
gss_cred_usage_t usage, gss_OID_set *rmechs)
{
OM_uint32 ret, tmpmin;
gss_cred_id_t creds = GSS_C_NO_CREDENTIAL, *credptr;
gss_OID_set cred_mechs = GSS_C_NULL_OID_SET;
gss_OID_set intersect_mechs = GSS_C_NULL_OID_SET;
unsigned int i;
int present;
if (spcred == NULL) {
/*
* The default credentials were supplied. Return a list of all
* available mechs except SPNEGO. When initiating, trim this
* list to mechs we can acquire credentials for.
*/
credptr = (usage == GSS_C_INITIATE) ? &creds : NULL;
ret = get_available_mechs(minor_status, GSS_C_NO_NAME, usage,
GSS_C_NO_CRED_STORE, credptr,
rmechs);
gss_release_cred(&tmpmin, &creds);
return (ret);
}
/* Get the list of mechs in the mechglue cred. */
ret = gss_inquire_cred(minor_status, spcred->mcred, NULL, NULL, NULL,
&cred_mechs);
if (ret != GSS_S_COMPLETE)
return (ret);
if (spcred->neg_mechs == GSS_C_NULL_OID_SET) {
/* gss_set_neg_mechs was never called; return cred_mechs. */
*rmechs = cred_mechs;
*minor_status = 0;
return (GSS_S_COMPLETE);
}
/* Compute the intersection of cred_mechs and spcred->neg_mechs,
* preserving the order in spcred->neg_mechs. */
ret = gss_create_empty_oid_set(minor_status, &intersect_mechs);
if (ret != GSS_S_COMPLETE) {
gss_release_oid_set(&tmpmin, &cred_mechs);
return (ret);
}
for (i = 0; i < spcred->neg_mechs->count; i++) {
gss_test_oid_set_member(&tmpmin,
&spcred->neg_mechs->elements[i],
cred_mechs, &present);
if (!present)
continue;
ret = gss_add_oid_set_member(minor_status,
&spcred->neg_mechs->elements[i],
&intersect_mechs);
if (ret != GSS_S_COMPLETE)
break;
}
gss_release_oid_set(&tmpmin, &cred_mechs);
if (intersect_mechs->count == 0 || ret != GSS_S_COMPLETE) {
gss_release_oid_set(&tmpmin, &intersect_mechs);
*minor_status = ERR_SPNEGO_NO_MECHS_AVAILABLE;
map_errcode(minor_status);
return (GSS_S_FAILURE);
}
*rmechs = intersect_mechs;
*minor_status = 0;
return (GSS_S_COMPLETE);
}
/* following are token creation and reading routines */
/*
* If buff_in is not pointing to a MECH_OID, then return NULL and do not
* advance the buffer, otherwise, decode the mech_oid from the buffer and
* place in gss_OID.
*/
static gss_OID
get_mech_oid(OM_uint32 *minor_status, unsigned char **buff_in, size_t length)
{
OM_uint32 status;
gss_OID_desc toid;
gss_OID mech_out = NULL;
unsigned char *start, *end;
if (length < 1 || **buff_in != MECH_OID)
return (NULL);
start = *buff_in;
end = start + length;
(*buff_in)++;
toid.length = *(*buff_in)++;
if ((*buff_in + toid.length) > end)
return (NULL);
toid.elements = *buff_in;
*buff_in += toid.length;
status = generic_gss_copy_oid(minor_status, &toid, &mech_out);
if (status != GSS_S_COMPLETE) {
map_errcode(minor_status);
mech_out = NULL;
}
return (mech_out);
}
/*
* der encode the given mechanism oid into buf_out, advancing the
* buffer pointer.
*/
static int
put_mech_oid(unsigned char **buf_out, gss_OID_const mech, unsigned int buflen)
{
if (buflen < mech->length + 2)
return (-1);
*(*buf_out)++ = MECH_OID;
*(*buf_out)++ = (unsigned char) mech->length;
memcpy(*buf_out, mech->elements, mech->length);
*buf_out += mech->length;
return (0);
}
/*
* verify that buff_in points to an octet string, if it does not,
* return NULL and don't advance the pointer. If it is an octet string
* decode buff_in into a gss_buffer_t and return it, advancing the
* buffer pointer.
*/
static gss_buffer_t
get_input_token(unsigned char **buff_in, unsigned int buff_length)
{
gss_buffer_t input_token;
unsigned int len;
if (g_get_tag_and_length(buff_in, OCTET_STRING, buff_length, &len) < 0)
return (NULL);
input_token = (gss_buffer_t)malloc(sizeof (gss_buffer_desc));
if (input_token == NULL)
return (NULL);
input_token->length = len;
if (input_token->length > 0) {
input_token->value = gssalloc_malloc(input_token->length);
if (input_token->value == NULL) {
free(input_token);
return (NULL);
}
memcpy(input_token->value, *buff_in, input_token->length);
} else {
input_token->value = NULL;
}
*buff_in += input_token->length;
return (input_token);
}
/*
* verify that the input token length is not 0. If it is, just return.
* If the token length is greater than 0, der encode as an octet string
* and place in buf_out, advancing buf_out.
*/
static int
put_input_token(unsigned char **buf_out, gss_buffer_t input_token,
unsigned int buflen)
{
int ret;
/* if token length is 0, we do not want to send */
if (input_token->length == 0)
return (0);
if (input_token->length > buflen)
return (-1);
*(*buf_out)++ = OCTET_STRING;
if ((ret = gssint_put_der_length(input_token->length, buf_out,
input_token->length)))
return (ret);
TWRITE_STR(*buf_out, input_token->value, input_token->length);
return (0);
}
/*
* verify that buff_in points to a sequence of der encoding. The mech
* set is the only sequence of encoded object in the token, so if it is
* a sequence of encoding, decode the mechset into a gss_OID_set and
* return it, advancing the buffer pointer.
*/
static gss_OID_set
get_mech_set(OM_uint32 *minor_status, unsigned char **buff_in,
unsigned int buff_length)
{
gss_OID_set returned_mechSet;
OM_uint32 major_status;
int length;
unsigned int bytes;
OM_uint32 set_length;
unsigned char *start;
int i;
if (**buff_in != SEQUENCE_OF)
return (NULL);
start = *buff_in;
(*buff_in)++;
length = gssint_get_der_length(buff_in, buff_length, &bytes);
if (length < 0 || buff_length - bytes < (unsigned int)length)
return NULL;
major_status = gss_create_empty_oid_set(minor_status,
&returned_mechSet);
if (major_status != GSS_S_COMPLETE)
return (NULL);
for (set_length = 0, i = 0; set_length < (unsigned int)length; i++) {
gss_OID_desc *temp = get_mech_oid(minor_status, buff_in,
buff_length - (*buff_in - start));
if (temp == NULL)
break;
major_status = gss_add_oid_set_member(minor_status,
temp, &returned_mechSet);
if (major_status == GSS_S_COMPLETE) {
set_length += returned_mechSet->elements[i].length +2;
if (generic_gss_release_oid(minor_status, &temp))
map_errcode(minor_status);
}
}
return (returned_mechSet);
}
/*
* Encode mechSet into buf.
*/
static int
put_mech_set(gss_OID_set mechSet, gss_buffer_t buf)
{
unsigned char *ptr;
unsigned int i;
unsigned int tlen, ilen;
tlen = ilen = 0;
for (i = 0; i < mechSet->count; i++) {
/*
* 0x06 [DER LEN] [OID]
*/
ilen += 1 +
gssint_der_length_size(mechSet->elements[i].length) +
mechSet->elements[i].length;
}
/*
* 0x30 [DER LEN]
*/
tlen = 1 + gssint_der_length_size(ilen) + ilen;
ptr = gssalloc_malloc(tlen);
if (ptr == NULL)
return -1;
buf->value = ptr;
buf->length = tlen;
#define REMAIN (buf->length - ((unsigned char *)buf->value - ptr))
*ptr++ = SEQUENCE_OF;
if (gssint_put_der_length(ilen, &ptr, REMAIN) < 0)
return -1;
for (i = 0; i < mechSet->count; i++) {
if (put_mech_oid(&ptr, &mechSet->elements[i], REMAIN) < 0) {
return -1;
}
}
return 0;
#undef REMAIN
}
/*
* Verify that buff_in is pointing to a BIT_STRING with the correct
* length and padding for the req_flags. If it is, decode req_flags
* and return them, otherwise, return NULL.
*/
static OM_uint32
get_req_flags(unsigned char **buff_in, OM_uint32 bodysize,
OM_uint32 *req_flags)
{
unsigned int len;
if (**buff_in != (CONTEXT | 0x01))
return (0);
if (g_get_tag_and_length(buff_in, (CONTEXT | 0x01),
bodysize, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_LENGTH)
return GSS_S_DEFECTIVE_TOKEN;
if (*(*buff_in)++ != BIT_STRING_PADDING)
return GSS_S_DEFECTIVE_TOKEN;
*req_flags = (OM_uint32) (*(*buff_in)++ >> 1);
return (0);
}
static OM_uint32
get_negTokenInit(OM_uint32 *minor_status,
gss_buffer_t buf,
gss_buffer_t der_mechSet,
gss_OID_set *mechSet,
OM_uint32 *req_flags,
gss_buffer_t *mechtok,
gss_buffer_t *mechListMIC)
{
OM_uint32 err;
unsigned char *ptr, *bufstart;
unsigned int len;
gss_buffer_desc tmpbuf;
*minor_status = 0;
der_mechSet->length = 0;
der_mechSet->value = NULL;
*mechSet = GSS_C_NO_OID_SET;
*req_flags = 0;
*mechtok = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf->value;
if ((buf->length - (ptr - bufstart)) > INT_MAX)
return GSS_S_FAILURE;
#define REMAIN (buf->length - (ptr - bufstart))
err = g_verify_token_header(gss_mech_spnego,
&len, &ptr, 0, REMAIN);
if (err) {
*minor_status = err;
map_errcode(minor_status);
return GSS_S_FAILURE;
}
*minor_status = g_verify_neg_token_init(&ptr, REMAIN);
if (*minor_status) {
map_errcode(minor_status);
return GSS_S_FAILURE;
}
/* alias into input_token */
tmpbuf.value = ptr;
tmpbuf.length = REMAIN;
*mechSet = get_mech_set(minor_status, &ptr, REMAIN);
if (*mechSet == NULL)
return GSS_S_FAILURE;
tmpbuf.length = ptr - (unsigned char *)tmpbuf.value;
der_mechSet->value = gssalloc_malloc(tmpbuf.length);
if (der_mechSet->value == NULL)
return GSS_S_FAILURE;
memcpy(der_mechSet->value, tmpbuf.value, tmpbuf.length);
der_mechSet->length = tmpbuf.length;
err = get_req_flags(&ptr, REMAIN, req_flags);
if (err != GSS_S_COMPLETE) {
return err;
}
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x02),
REMAIN, &len) >= 0) {
*mechtok = get_input_token(&ptr, len);
if (*mechtok == GSS_C_NO_BUFFER) {
return GSS_S_FAILURE;
}
}
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x03),
REMAIN, &len) >= 0) {
*mechListMIC = get_input_token(&ptr, len);
if (*mechListMIC == GSS_C_NO_BUFFER) {
return GSS_S_FAILURE;
}
}
return GSS_S_COMPLETE;
#undef REMAIN
}
static OM_uint32
get_negTokenResp(OM_uint32 *minor_status,
unsigned char *buf, unsigned int buflen,
OM_uint32 *negState,
gss_OID *supportedMech,
gss_buffer_t *responseToken,
gss_buffer_t *mechListMIC)
{
unsigned char *ptr, *bufstart;
unsigned int len;
int tmplen;
unsigned int tag, bytes;
*negState = ACCEPT_DEFECTIVE_TOKEN;
*supportedMech = GSS_C_NO_OID;
*responseToken = *mechListMIC = GSS_C_NO_BUFFER;
ptr = bufstart = buf;
#define REMAIN (buflen - (ptr - bufstart))
if (g_get_tag_and_length(&ptr, (CONTEXT | 0x01), REMAIN, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (*ptr++ == SEQUENCE) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
}
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
if (tag == CONTEXT) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
if (g_get_tag_and_length(&ptr, ENUMERATED,
REMAIN, &len) < 0)
return GSS_S_DEFECTIVE_TOKEN;
if (len != ENUMERATION_LENGTH)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
return GSS_S_DEFECTIVE_TOKEN;
*negState = *ptr++;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x01)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*supportedMech = get_mech_oid(minor_status, &ptr, REMAIN);
if (*supportedMech == GSS_C_NO_OID)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x02)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*responseToken = get_input_token(&ptr, REMAIN);
if (*responseToken == GSS_C_NO_BUFFER)
return GSS_S_DEFECTIVE_TOKEN;
if (REMAIN < 1)
tag = 0;
else
tag = *ptr++;
}
if (tag == (CONTEXT | 0x03)) {
tmplen = gssint_get_der_length(&ptr, REMAIN, &bytes);
if (tmplen < 0 || REMAIN < (unsigned int)tmplen)
return GSS_S_DEFECTIVE_TOKEN;
*mechListMIC = get_input_token(&ptr, REMAIN);
if (*mechListMIC == GSS_C_NO_BUFFER)
return GSS_S_DEFECTIVE_TOKEN;
/* Handle Windows 2000 duplicate response token */
if (*responseToken &&
((*responseToken)->length == (*mechListMIC)->length) &&
!memcmp((*responseToken)->value, (*mechListMIC)->value,
(*responseToken)->length)) {
OM_uint32 tmpmin;
gss_release_buffer(&tmpmin, *mechListMIC);
free(*mechListMIC);
*mechListMIC = NULL;
}
}
return GSS_S_COMPLETE;
#undef REMAIN
}
/*
* der encode the passed negResults as an ENUMERATED type and
* place it in buf_out, advancing the buffer.
*/
static int
put_negResult(unsigned char **buf_out, OM_uint32 negResult,
unsigned int buflen)
{
if (buflen < 3)
return (-1);
*(*buf_out)++ = ENUMERATED;
*(*buf_out)++ = ENUMERATION_LENGTH;
*(*buf_out)++ = (unsigned char) negResult;
return (0);
}
/*
* This routine compares the recieved mechset to the mechset that
* this server can support. It looks sequentially through the mechset
* and the first one that matches what the server can support is
* chosen as the negotiated mechanism. If one is found, negResult
* is set to ACCEPT_INCOMPLETE if it's the first mech, REQUEST_MIC if
* it's not the first mech, otherwise we return NULL and negResult
* is set to REJECT. The returned pointer is an alias into
* received->elements and should not be freed.
*
* NOTE: There is currently no way to specify a preference order of
* mechanisms supported by the acceptor.
*/
static gss_OID
negotiate_mech(gss_OID_set supported, gss_OID_set received,
OM_uint32 *negResult)
{
size_t i, j;
for (i = 0; i < received->count; i++) {
gss_OID mech_oid = &received->elements[i];
/* Accept wrong mechanism OID from MS clients */
if (g_OID_equal(mech_oid, &gss_mech_krb5_wrong_oid))
mech_oid = (gss_OID)&gss_mech_krb5_oid;
for (j = 0; j < supported->count; j++) {
if (g_OID_equal(mech_oid, &supported->elements[j])) {
*negResult = (i == 0) ? ACCEPT_INCOMPLETE :
REQUEST_MIC;
return &received->elements[i];
}
}
}
*negResult = REJECT;
return (NULL);
}
/*
* the next two routines make a token buffer suitable for
* spnego_gss_display_status. These currently take the string
* in name and place it in the token. Eventually, if
* spnego_gss_display_status returns valid error messages,
* these routines will be changes to return the error string.
*/
static spnego_token_t
make_spnego_token(const char *name)
{
return (spnego_token_t)strdup(name);
}
static gss_buffer_desc
make_err_msg(const char *name)
{
gss_buffer_desc buffer;
if (name == NULL) {
buffer.length = 0;
buffer.value = NULL;
} else {
buffer.length = strlen(name)+1;
buffer.value = make_spnego_token(name);
}
return (buffer);
}
/*
* Create the client side spnego token passed back to gss_init_sec_context
* and eventually up to the application program and over to the server.
*
* Use DER rules, definite length method per RFC 2478
*/
static int
make_spnego_tokenInit_msg(spnego_gss_ctx_id_t spnego_ctx,
int negHintsCompat,
gss_buffer_t mechListMIC, OM_uint32 req_flags,
gss_buffer_t data, send_token_flag sendtoken,
gss_buffer_t outbuf)
{
int ret = 0;
unsigned int tlen, dataLen = 0;
unsigned int negTokenInitSize = 0;
unsigned int negTokenInitSeqSize = 0;
unsigned int negTokenInitContSize = 0;
unsigned int rspTokenSize = 0;
unsigned int mechListTokenSize = 0;
unsigned int micTokenSize = 0;
unsigned char *t;
unsigned char *ptr;
if (outbuf == GSS_C_NO_BUFFER)
return (-1);
outbuf->length = 0;
outbuf->value = NULL;
/* calculate the data length */
/*
* 0xa0 [DER LEN] [mechTypes]
*/
mechListTokenSize = 1 +
gssint_der_length_size(spnego_ctx->DER_mechTypes.length) +
spnego_ctx->DER_mechTypes.length;
dataLen += mechListTokenSize;
/*
* If a token from gss_init_sec_context exists,
* add the length of the token + the ASN.1 overhead
*/
if (data != NULL) {
/*
* Encoded in final output as:
* 0xa2 [DER LEN] 0x04 [DER LEN] [DATA]
* -----s--------|--------s2----------
*/
rspTokenSize = 1 +
gssint_der_length_size(data->length) +
data->length;
dataLen += 1 + gssint_der_length_size(rspTokenSize) +
rspTokenSize;
}
if (mechListMIC) {
/*
* Encoded in final output as:
* 0xa3 [DER LEN] 0x04 [DER LEN] [DATA]
* --s-- -----tlen------------
*/
micTokenSize = 1 +
gssint_der_length_size(mechListMIC->length) +
mechListMIC->length;
dataLen += 1 +
gssint_der_length_size(micTokenSize) +
micTokenSize;
}
/*
* Add size of DER encoding
* [ SEQUENCE { MechTypeList | ReqFLags | Token | mechListMIC } ]
* 0x30 [DER_LEN] [data]
*
*/
negTokenInitContSize = dataLen;
negTokenInitSeqSize = 1 + gssint_der_length_size(dataLen) + dataLen;
dataLen = negTokenInitSeqSize;
/*
* negTokenInitSize indicates the bytes needed to
* hold the ASN.1 encoding of the entire NegTokenInit
* SEQUENCE.
* 0xa0 [DER_LEN] + data
*
*/
negTokenInitSize = 1 +
gssint_der_length_size(negTokenInitSeqSize) +
negTokenInitSeqSize;
tlen = g_token_size(gss_mech_spnego, negTokenInitSize);
t = (unsigned char *) gssalloc_malloc(tlen);
if (t == NULL) {
return (-1);
}
ptr = t;
/* create the message */
if ((ret = g_make_token_header(gss_mech_spnego, negTokenInitSize,
&ptr, tlen)))
goto errout;
*ptr++ = CONTEXT; /* NegotiationToken identifier */
if ((ret = gssint_put_der_length(negTokenInitSeqSize, &ptr, tlen)))
goto errout;
*ptr++ = SEQUENCE;
if ((ret = gssint_put_der_length(negTokenInitContSize, &ptr,
tlen - (int)(ptr-t))))
goto errout;
*ptr++ = CONTEXT | 0x00; /* MechTypeList identifier */
if ((ret = gssint_put_der_length(spnego_ctx->DER_mechTypes.length,
&ptr, tlen - (int)(ptr-t))))
goto errout;
/* We already encoded the MechSetList */
(void) memcpy(ptr, spnego_ctx->DER_mechTypes.value,
spnego_ctx->DER_mechTypes.length);
ptr += spnego_ctx->DER_mechTypes.length;
if (data != NULL) {
*ptr++ = CONTEXT | 0x02;
if ((ret = gssint_put_der_length(rspTokenSize,
&ptr, tlen - (int)(ptr - t))))
goto errout;
if ((ret = put_input_token(&ptr, data,
tlen - (int)(ptr - t))))
goto errout;
}
if (mechListMIC != GSS_C_NO_BUFFER) {
*ptr++ = CONTEXT | 0x03;
if ((ret = gssint_put_der_length(micTokenSize,
&ptr, tlen - (int)(ptr - t))))
goto errout;
if (negHintsCompat) {
ret = put_neg_hints(&ptr, mechListMIC,
tlen - (int)(ptr - t));
if (ret)
goto errout;
} else if ((ret = put_input_token(&ptr, mechListMIC,
tlen - (int)(ptr - t))))
goto errout;
}
errout:
if (ret != 0) {
if (t)
free(t);
t = NULL;
tlen = 0;
}
outbuf->length = tlen;
outbuf->value = (void *) t;
return (ret);
}
/*
* create the server side spnego token passed back to
* gss_accept_sec_context and eventually up to the application program
* and over to the client.
*/
static int
make_spnego_tokenTarg_msg(OM_uint32 status, gss_OID mech_wanted,
gss_buffer_t data, gss_buffer_t mechListMIC,
send_token_flag sendtoken,
gss_buffer_t outbuf)
{
unsigned int tlen = 0;
unsigned int ret = 0;
unsigned int NegTokenTargSize = 0;
unsigned int NegTokenSize = 0;
unsigned int rspTokenSize = 0;
unsigned int micTokenSize = 0;
unsigned int dataLen = 0;
unsigned char *t;
unsigned char *ptr;
if (outbuf == GSS_C_NO_BUFFER)
return (GSS_S_DEFECTIVE_TOKEN);
if (sendtoken == INIT_TOKEN_SEND && mech_wanted == GSS_C_NO_OID)
return (GSS_S_DEFECTIVE_TOKEN);
outbuf->length = 0;
outbuf->value = NULL;
/*
* ASN.1 encoding of the negResult
* ENUMERATED type is 3 bytes
* ENUMERATED TAG, Length, Value,
* Plus 2 bytes for the CONTEXT id and length.
*/
dataLen = 5;
/*
* calculate data length
*
* If this is the initial token, include length of
* mech_type and the negotiation result fields.
*/
if (sendtoken == INIT_TOKEN_SEND) {
int mechlistTokenSize;
/*
* 1 byte for the CONTEXT ID(0xa0),
* 1 byte for the OID ID(0x06)
* 1 byte for OID Length field
* Plus the rest... (OID Length, OID value)
*/
mechlistTokenSize = 3 + mech_wanted->length +
gssint_der_length_size(mech_wanted->length);
dataLen += mechlistTokenSize;
}
if (data != NULL && data->length > 0) {
/* Length of the inner token */
rspTokenSize = 1 + gssint_der_length_size(data->length) +
data->length;
dataLen += rspTokenSize;
/* Length of the outer token */
dataLen += 1 + gssint_der_length_size(rspTokenSize);
}
if (mechListMIC != NULL) {
/* Length of the inner token */
micTokenSize = 1 + gssint_der_length_size(mechListMIC->length) +
mechListMIC->length;
dataLen += micTokenSize;
/* Length of the outer token */
dataLen += 1 + gssint_der_length_size(micTokenSize);
}
/*
* Add size of DER encoded:
* NegTokenTarg [ SEQUENCE ] of
* NegResult[0] ENUMERATED {
* accept_completed(0),
* accept_incomplete(1),
* reject(2) }
* supportedMech [1] MechType OPTIONAL,
* responseToken [2] OCTET STRING OPTIONAL,
* mechListMIC [3] OCTET STRING OPTIONAL
*
* size = data->length + MechListMic + SupportedMech len +
* Result Length + ASN.1 overhead
*/
NegTokenTargSize = dataLen;
dataLen += 1 + gssint_der_length_size(NegTokenTargSize);
/*
* NegotiationToken [ CHOICE ]{
* negTokenInit [0] NegTokenInit,
* negTokenTarg [1] NegTokenTarg }
*/
NegTokenSize = dataLen;
dataLen += 1 + gssint_der_length_size(NegTokenSize);
tlen = dataLen;
t = (unsigned char *) gssalloc_malloc(tlen);
if (t == NULL) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
ptr = t;
/*
* Indicate that we are sending CHOICE 1
* (NegTokenTarg)
*/
*ptr++ = CONTEXT | 0x01;
if (gssint_put_der_length(NegTokenSize, &ptr, dataLen) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
*ptr++ = SEQUENCE;
if (gssint_put_der_length(NegTokenTargSize, &ptr,
tlen - (int)(ptr-t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
/*
* First field of the NegTokenTarg SEQUENCE
* is the ENUMERATED NegResult.
*/
*ptr++ = CONTEXT;
if (gssint_put_der_length(3, &ptr,
tlen - (int)(ptr-t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_negResult(&ptr, status, tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (sendtoken == INIT_TOKEN_SEND) {
/*
* Next, is the Supported MechType
*/
*ptr++ = CONTEXT | 0x01;
if (gssint_put_der_length(mech_wanted->length + 2,
&ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_mech_oid(&ptr, mech_wanted,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
if (data != NULL && data->length > 0) {
*ptr++ = CONTEXT | 0x02;
if (gssint_put_der_length(rspTokenSize, &ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_input_token(&ptr, data,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
if (mechListMIC != NULL) {
*ptr++ = CONTEXT | 0x03;
if (gssint_put_der_length(micTokenSize, &ptr,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
if (put_input_token(&ptr, mechListMIC,
tlen - (int)(ptr - t)) < 0) {
ret = GSS_S_DEFECTIVE_TOKEN;
goto errout;
}
}
ret = GSS_S_COMPLETE;
errout:
if (ret != GSS_S_COMPLETE) {
if (t)
free(t);
} else {
outbuf->length = ptr - t;
outbuf->value = (void *) t;
}
return (ret);
}
/* determine size of token */
static int
g_token_size(gss_OID_const mech, unsigned int body_size)
{
int hdrsize;
/*
* Initialize the header size to the
* MECH_OID byte + the bytes needed to indicate the
* length of the OID + the OID itself.
*
* 0x06 [MECHLENFIELD] MECHDATA
*/
hdrsize = 1 + gssint_der_length_size(mech->length) + mech->length;
/*
* Now add the bytes needed for the initial header
* token bytes:
* 0x60 + [DER_LEN] + HDRSIZE
*/
hdrsize += 1 + gssint_der_length_size(body_size + hdrsize);
return (hdrsize + body_size);
}
/*
* generate token header.
*
* Use DER Definite Length method per RFC2478
* Use of indefinite length encoding will not be compatible
* with Microsoft or others that actually follow the spec.
*/
static int
g_make_token_header(gss_OID_const mech,
unsigned int body_size,
unsigned char **buf,
unsigned int totallen)
{
int ret = 0;
unsigned int hdrsize;
unsigned char *p = *buf;
hdrsize = 1 + gssint_der_length_size(mech->length) + mech->length;
*(*buf)++ = HEADER_ID;
if ((ret = gssint_put_der_length(hdrsize + body_size, buf, totallen)))
return (ret);
*(*buf)++ = MECH_OID;
if ((ret = gssint_put_der_length(mech->length, buf,
totallen - (int)(p - *buf))))
return (ret);
TWRITE_STR(*buf, mech->elements, mech->length);
return (0);
}
/*
* NOTE: This checks that the length returned by
* gssint_get_der_length() is not greater than the number of octets
* remaining, even though gssint_get_der_length() already checks, in
* theory.
*/
static int
g_get_tag_and_length(unsigned char **buf, int tag,
unsigned int buflen, unsigned int *outlen)
{
unsigned char *ptr = *buf;
int ret = -1; /* pessimists, assume failure ! */
unsigned int encoded_len;
int tmplen = 0;
*outlen = 0;
if (buflen > 1 && *ptr == tag) {
ptr++;
tmplen = gssint_get_der_length(&ptr, buflen - 1,
&encoded_len);
if (tmplen < 0) {
ret = -1;
} else if ((unsigned int)tmplen > buflen - (ptr - *buf)) {
ret = -1;
} else
ret = 0;
}
*outlen = tmplen;
*buf = ptr;
return (ret);
}
static int
g_verify_neg_token_init(unsigned char **buf_in, unsigned int cur_size)
{
unsigned char *buf = *buf_in;
unsigned char *endptr = buf + cur_size;
int seqsize;
int ret = 0;
unsigned int bytes;
/*
* Verify this is a NegotiationToken type token
* - check for a0(context specific identifier)
* - get length and verify that enoughd ata exists
*/
if (g_get_tag_and_length(&buf, CONTEXT, cur_size, &bytes) < 0)
return (G_BAD_TOK_HEADER);
cur_size = bytes; /* should indicate bytes remaining */
/*
* Verify the next piece, it should identify this as
* a strucure of type NegTokenInit.
*/
if (*buf++ == SEQUENCE) {
if ((seqsize = gssint_get_der_length(&buf, cur_size, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
/*
* Make sure we have the entire buffer as described
*/
if (seqsize > endptr - buf)
return (G_BAD_TOK_HEADER);
} else {
return (G_BAD_TOK_HEADER);
}
cur_size = seqsize; /* should indicate bytes remaining */
/*
* Verify that the first blob is a sequence of mechTypes
*/
if (*buf++ == CONTEXT) {
if ((seqsize = gssint_get_der_length(&buf, cur_size, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
/*
* Make sure we have the entire buffer as described
*/
if (seqsize > endptr - buf)
return (G_BAD_TOK_HEADER);
} else {
return (G_BAD_TOK_HEADER);
}
/*
* At this point, *buf should be at the beginning of the
* DER encoded list of mech types that are to be negotiated.
*/
*buf_in = buf;
return (ret);
}
/* verify token header. */
static int
g_verify_token_header(gss_OID_const mech,
unsigned int *body_size,
unsigned char **buf_in,
int tok_type,
unsigned int toksize)
{
unsigned char *buf = *buf_in;
int seqsize;
gss_OID_desc toid;
int ret = 0;
unsigned int bytes;
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != HEADER_ID)
return (G_BAD_TOK_HEADER);
if ((seqsize = gssint_get_der_length(&buf, toksize, &bytes)) < 0)
return (G_BAD_TOK_HEADER);
if ((seqsize + bytes) != toksize)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
if (*buf++ != MECH_OID)
return (G_BAD_TOK_HEADER);
if (toksize-- < 1)
return (G_BAD_TOK_HEADER);
toid.length = *buf++;
if (toksize < toid.length)
return (G_BAD_TOK_HEADER);
else
toksize -= toid.length;
toid.elements = buf;
buf += toid.length;
if (!g_OID_equal(&toid, mech))
ret = G_WRONG_MECH;
/*
* G_WRONG_MECH is not returned immediately because it's more important
* to return G_BAD_TOK_HEADER if the token header is in fact bad
*/
if (toksize < 2)
return (G_BAD_TOK_HEADER);
else
toksize -= 2;
if (!ret) {
*buf_in = buf;
*body_size = toksize;
}
return (ret);
}
/*
* Return non-zero if the oid is one of the kerberos mech oids,
* otherwise return zero.
*
* N.B. There are 3 oids that represent the kerberos mech:
* RFC-specified GSS_MECH_KRB5_OID,
* Old pre-RFC GSS_MECH_KRB5_OLD_OID,
* Incorrect MS GSS_MECH_KRB5_WRONG_OID
*/
static int
is_kerb_mech(gss_OID oid)
{
int answer = 0;
OM_uint32 minor;
extern const gss_OID_set_desc * const gss_mech_set_krb5_both;
(void) gss_test_oid_set_member(&minor,
oid, (gss_OID_set)gss_mech_set_krb5_both, &answer);
return (answer);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_2200_0 |
crossvul-cpp_data_bad_5371_1 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2003 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* JP2 Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include "jasper/jas_image.h"
#include "jasper/jas_stream.h"
#include "jasper/jas_math.h"
#include "jasper/jas_debug.h"
#include "jasper/jas_malloc.h"
#include "jasper/jas_version.h"
#include "jp2_cod.h"
#include "jp2_dec.h"
#define JP2_VALIDATELEN (JAS_MIN(JP2_JP_LEN + 16, JAS_STREAM_MAXPUTBACK))
static jp2_dec_t *jp2_dec_create(void);
static void jp2_dec_destroy(jp2_dec_t *dec);
static int jp2_getcs(jp2_colr_t *colr);
static int fromiccpcs(int cs);
static int jp2_getct(int colorspace, int type, int assoc);
/******************************************************************************\
* Functions.
\******************************************************************************/
jas_image_t *jp2_decode(jas_stream_t *in, char *optstr)
{
jp2_box_t *box;
int found;
jas_image_t *image;
jp2_dec_t *dec;
bool samedtype;
int dtype;
unsigned int i;
jp2_cmap_t *cmapd;
jp2_pclr_t *pclrd;
jp2_cdef_t *cdefd;
unsigned int channo;
int newcmptno;
int_fast32_t *lutents;
#if 0
jp2_cdefchan_t *cdefent;
int cmptno;
#endif
jp2_cmapent_t *cmapent;
jas_icchdr_t icchdr;
jas_iccprof_t *iccprof;
dec = 0;
box = 0;
image = 0;
if (!(dec = jp2_dec_create())) {
goto error;
}
/* Get the first box. This should be a JP box. */
if (!(box = jp2_box_get(in))) {
jas_eprintf("error: cannot get box\n");
goto error;
}
if (box->type != JP2_BOX_JP) {
jas_eprintf("error: expecting signature box\n");
goto error;
}
if (box->data.jp.magic != JP2_JP_MAGIC) {
jas_eprintf("incorrect magic number\n");
goto error;
}
jp2_box_destroy(box);
box = 0;
/* Get the second box. This should be a FTYP box. */
if (!(box = jp2_box_get(in))) {
goto error;
}
if (box->type != JP2_BOX_FTYP) {
jas_eprintf("expecting file type box\n");
goto error;
}
jp2_box_destroy(box);
box = 0;
/* Get more boxes... */
found = 0;
while ((box = jp2_box_get(in))) {
if (jas_getdbglevel() >= 1) {
jas_eprintf("box type %s\n", box->info->name);
}
switch (box->type) {
case JP2_BOX_JP2C:
found = 1;
break;
case JP2_BOX_IHDR:
if (!dec->ihdr) {
dec->ihdr = box;
box = 0;
}
break;
case JP2_BOX_BPCC:
if (!dec->bpcc) {
dec->bpcc = box;
box = 0;
}
break;
case JP2_BOX_CDEF:
if (!dec->cdef) {
dec->cdef = box;
box = 0;
}
break;
case JP2_BOX_PCLR:
if (!dec->pclr) {
dec->pclr = box;
box = 0;
}
break;
case JP2_BOX_CMAP:
if (!dec->cmap) {
dec->cmap = box;
box = 0;
}
break;
case JP2_BOX_COLR:
if (!dec->colr) {
dec->colr = box;
box = 0;
}
break;
}
if (box) {
jp2_box_destroy(box);
box = 0;
}
if (found) {
break;
}
}
if (!found) {
jas_eprintf("error: no code stream found\n");
goto error;
}
if (!(dec->image = jpc_decode(in, optstr))) {
jas_eprintf("error: cannot decode code stream\n");
goto error;
}
/* An IHDR box must be present. */
if (!dec->ihdr) {
jas_eprintf("error: missing IHDR box\n");
goto error;
}
/* Does the number of components indicated in the IHDR box match
the value specified in the code stream? */
if (dec->ihdr->data.ihdr.numcmpts != JAS_CAST(uint,
jas_image_numcmpts(dec->image))) {
jas_eprintf("warning: number of components mismatch\n");
}
/* At least one component must be present. */
if (!jas_image_numcmpts(dec->image)) {
jas_eprintf("error: no components\n");
goto error;
}
/* Determine if all components have the same data type. */
samedtype = true;
dtype = jas_image_cmptdtype(dec->image, 0);
for (i = 1; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) {
if (jas_image_cmptdtype(dec->image, i) != dtype) {
samedtype = false;
break;
}
}
/* Is the component data type indicated in the IHDR box consistent
with the data in the code stream? */
if ((samedtype && dec->ihdr->data.ihdr.bpc != JP2_DTYPETOBPC(dtype)) ||
(!samedtype && dec->ihdr->data.ihdr.bpc != JP2_IHDR_BPCNULL)) {
jas_eprintf("warning: component data type mismatch\n");
}
/* Is the compression type supported? */
if (dec->ihdr->data.ihdr.comptype != JP2_IHDR_COMPTYPE) {
jas_eprintf("error: unsupported compression type\n");
goto error;
}
if (dec->bpcc) {
/* Is the number of components indicated in the BPCC box
consistent with the code stream data? */
if (dec->bpcc->data.bpcc.numcmpts != JAS_CAST(uint, jas_image_numcmpts(
dec->image))) {
jas_eprintf("warning: number of components mismatch\n");
}
/* Is the component data type information indicated in the BPCC
box consistent with the code stream data? */
if (!samedtype) {
for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image));
++i) {
if (jas_image_cmptdtype(dec->image, i) !=
JP2_BPCTODTYPE(dec->bpcc->data.bpcc.bpcs[i])) {
jas_eprintf("warning: component data type mismatch\n");
}
}
} else {
jas_eprintf("warning: superfluous BPCC box\n");
}
}
/* A COLR box must be present. */
if (!dec->colr) {
jas_eprintf("error: no COLR box\n");
goto error;
}
switch (dec->colr->data.colr.method) {
case JP2_COLR_ENUM:
jas_image_setclrspc(dec->image, jp2_getcs(&dec->colr->data.colr));
break;
case JP2_COLR_ICC:
iccprof = jas_iccprof_createfrombuf(dec->colr->data.colr.iccp,
dec->colr->data.colr.iccplen);
if (!iccprof) {
jas_eprintf("error: failed to parse ICC profile\n");
goto error;
}
jas_iccprof_gethdr(iccprof, &icchdr);
jas_eprintf("ICC Profile CS %08x\n", icchdr.colorspc);
jas_image_setclrspc(dec->image, fromiccpcs(icchdr.colorspc));
dec->image->cmprof_ = jas_cmprof_createfromiccprof(iccprof);
assert(dec->image->cmprof_);
jas_iccprof_destroy(iccprof);
break;
}
/* If a CMAP box is present, a PCLR box must also be present. */
if (dec->cmap && !dec->pclr) {
jas_eprintf("warning: missing PCLR box or superfluous CMAP box\n");
jp2_box_destroy(dec->cmap);
dec->cmap = 0;
}
/* If a CMAP box is not present, a PCLR box must not be present. */
if (!dec->cmap && dec->pclr) {
jas_eprintf("warning: missing CMAP box or superfluous PCLR box\n");
jp2_box_destroy(dec->pclr);
dec->pclr = 0;
}
/* Determine the number of channels (which is essentially the number
of components after any palette mappings have been applied). */
dec->numchans = dec->cmap ? dec->cmap->data.cmap.numchans :
JAS_CAST(uint, jas_image_numcmpts(dec->image));
/* Perform a basic sanity check on the CMAP box if present. */
if (dec->cmap) {
for (i = 0; i < dec->numchans; ++i) {
/* Is the component number reasonable? */
if (dec->cmap->data.cmap.ents[i].cmptno >= JAS_CAST(uint,
jas_image_numcmpts(dec->image))) {
jas_eprintf("error: invalid component number in CMAP box\n");
goto error;
}
/* Is the LUT index reasonable? */
if (dec->cmap->data.cmap.ents[i].pcol >=
dec->pclr->data.pclr.numchans) {
jas_eprintf("error: invalid CMAP LUT index\n");
goto error;
}
}
}
/* Allocate space for the channel-number to component-number LUT. */
if (!(dec->chantocmptlut = jas_alloc2(dec->numchans,
sizeof(uint_fast16_t)))) {
jas_eprintf("error: no memory\n");
goto error;
}
if (!dec->cmap) {
for (i = 0; i < dec->numchans; ++i) {
dec->chantocmptlut[i] = i;
}
} else {
cmapd = &dec->cmap->data.cmap;
pclrd = &dec->pclr->data.pclr;
cdefd = &dec->cdef->data.cdef;
for (channo = 0; channo < cmapd->numchans; ++channo) {
cmapent = &cmapd->ents[channo];
if (cmapent->map == JP2_CMAP_DIRECT) {
dec->chantocmptlut[channo] = channo;
} else if (cmapent->map == JP2_CMAP_PALETTE) {
lutents = jas_alloc2(pclrd->numlutents, sizeof(int_fast32_t));
for (i = 0; i < pclrd->numlutents; ++i) {
lutents[i] = pclrd->lutdata[cmapent->pcol + i * pclrd->numchans];
}
newcmptno = jas_image_numcmpts(dec->image);
jas_image_depalettize(dec->image, cmapent->cmptno,
pclrd->numlutents, lutents,
JP2_BPCTODTYPE(pclrd->bpc[cmapent->pcol]), newcmptno);
dec->chantocmptlut[channo] = newcmptno;
jas_free(lutents);
#if 0
if (dec->cdef) {
cdefent = jp2_cdef_lookup(cdefd, channo);
if (!cdefent) {
abort();
}
jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), cdefent->type, cdefent->assoc));
} else {
jas_image_setcmpttype(dec->image, newcmptno, jp2_getct(jas_image_clrspc(dec->image), 0, channo + 1));
}
#endif
}
}
}
/* Mark all components as being of unknown type. */
for (i = 0; i < JAS_CAST(uint, jas_image_numcmpts(dec->image)); ++i) {
jas_image_setcmpttype(dec->image, i, JAS_IMAGE_CT_UNKNOWN);
}
/* Determine the type of each component. */
if (dec->cdef) {
for (i = 0; i < dec->numchans; ++i) {
/* Is the channel number reasonable? */
if (dec->cdef->data.cdef.ents[i].channo >= dec->numchans) {
jas_eprintf("error: invalid channel number in CDEF box\n");
goto error;
}
jas_image_setcmpttype(dec->image,
dec->chantocmptlut[dec->cdef->data.cdef.ents[i].channo],
jp2_getct(jas_image_clrspc(dec->image),
dec->cdef->data.cdef.ents[i].type,
dec->cdef->data.cdef.ents[i].assoc));
}
} else {
for (i = 0; i < dec->numchans; ++i) {
jas_image_setcmpttype(dec->image, dec->chantocmptlut[i],
jp2_getct(jas_image_clrspc(dec->image), 0, i + 1));
}
}
/* Delete any components that are not of interest. */
for (i = jas_image_numcmpts(dec->image); i > 0; --i) {
if (jas_image_cmpttype(dec->image, i - 1) == JAS_IMAGE_CT_UNKNOWN) {
jas_image_delcmpt(dec->image, i - 1);
}
}
/* Ensure that some components survived. */
if (!jas_image_numcmpts(dec->image)) {
jas_eprintf("error: no components\n");
goto error;
}
#if 0
jas_eprintf("no of components is %d\n", jas_image_numcmpts(dec->image));
#endif
/* Prevent the image from being destroyed later. */
image = dec->image;
dec->image = 0;
jp2_dec_destroy(dec);
return image;
error:
if (box) {
jp2_box_destroy(box);
}
if (dec) {
jp2_dec_destroy(dec);
}
return 0;
}
int jp2_validate(jas_stream_t *in)
{
char buf[JP2_VALIDATELEN];
int i;
int n;
#if 0
jas_stream_t *tmpstream;
jp2_box_t *box;
#endif
assert(JAS_STREAM_MAXPUTBACK >= JP2_VALIDATELEN);
/* Read the validation data (i.e., the data used for detecting
the format). */
if ((n = jas_stream_read(in, buf, JP2_VALIDATELEN)) < 0) {
return -1;
}
/* Put the validation data back onto the stream, so that the
stream position will not be changed. */
for (i = n - 1; i >= 0; --i) {
if (jas_stream_ungetc(in, buf[i]) == EOF) {
return -1;
}
}
/* Did we read enough data? */
if (n < JP2_VALIDATELEN) {
return -1;
}
/* Is the box type correct? */
if (((buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]) !=
JP2_BOX_JP)
{
return -1;
}
return 0;
}
static jp2_dec_t *jp2_dec_create(void)
{
jp2_dec_t *dec;
if (!(dec = jas_malloc(sizeof(jp2_dec_t)))) {
return 0;
}
dec->ihdr = 0;
dec->bpcc = 0;
dec->cdef = 0;
dec->pclr = 0;
dec->image = 0;
dec->chantocmptlut = 0;
dec->cmap = 0;
dec->colr = 0;
return dec;
}
static void jp2_dec_destroy(jp2_dec_t *dec)
{
if (dec->ihdr) {
jp2_box_destroy(dec->ihdr);
}
if (dec->bpcc) {
jp2_box_destroy(dec->bpcc);
}
if (dec->cdef) {
jp2_box_destroy(dec->cdef);
}
if (dec->pclr) {
jp2_box_destroy(dec->pclr);
}
if (dec->image) {
jas_image_destroy(dec->image);
}
if (dec->cmap) {
jp2_box_destroy(dec->cmap);
}
if (dec->colr) {
jp2_box_destroy(dec->colr);
}
if (dec->chantocmptlut) {
jas_free(dec->chantocmptlut);
}
jas_free(dec);
}
static int jp2_getct(int colorspace, int type, int assoc)
{
if (type == 1 && assoc == 0) {
return JAS_IMAGE_CT_OPACITY;
}
if (type == 0 && assoc >= 1 && assoc <= 65534) {
switch (colorspace) {
case JAS_CLRSPC_FAM_RGB:
switch (assoc) {
case JP2_CDEF_RGB_R:
return JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_R);
break;
case JP2_CDEF_RGB_G:
return JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_G);
break;
case JP2_CDEF_RGB_B:
return JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_RGB_B);
break;
}
break;
case JAS_CLRSPC_FAM_YCBCR:
switch (assoc) {
case JP2_CDEF_YCBCR_Y:
return JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_YCBCR_Y);
break;
case JP2_CDEF_YCBCR_CB:
return JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_YCBCR_CB);
break;
case JP2_CDEF_YCBCR_CR:
return JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_YCBCR_CR);
break;
}
break;
case JAS_CLRSPC_FAM_GRAY:
switch (assoc) {
case JP2_CDEF_GRAY_Y:
return JAS_IMAGE_CT_COLOR(JAS_CLRSPC_CHANIND_GRAY_Y);
break;
}
break;
default:
return JAS_IMAGE_CT_COLOR(assoc - 1);
break;
}
}
return JAS_IMAGE_CT_UNKNOWN;
}
static int jp2_getcs(jp2_colr_t *colr)
{
if (colr->method == JP2_COLR_ENUM) {
switch (colr->csid) {
case JP2_COLR_SRGB:
return JAS_CLRSPC_SRGB;
break;
case JP2_COLR_SYCC:
return JAS_CLRSPC_SYCBCR;
break;
case JP2_COLR_SGRAY:
return JAS_CLRSPC_SGRAY;
break;
}
}
return JAS_CLRSPC_UNKNOWN;
}
static int fromiccpcs(int cs)
{
switch (cs) {
case ICC_CS_RGB:
return JAS_CLRSPC_GENRGB;
break;
case ICC_CS_YCBCR:
return JAS_CLRSPC_GENYCBCR;
break;
case ICC_CS_GRAY:
return JAS_CLRSPC_GENGRAY;
break;
}
return JAS_CLRSPC_UNKNOWN;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_5371_1 |
crossvul-cpp_data_good_5715_3 | /**
* FreeRDP: A Remote Desktop Protocol Implementation
* Security Support Provider Interface (SSPI)
*
* Copyright 2012 Marc-Andre Moreau <marcandre.moreau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdlib.h>
#include <winpr/windows.h>
#include <winpr/crt.h>
#include <winpr/sspi.h>
#include <winpr/print.h>
#include <openssl/ssl.h>
#include <openssl/err.h>
#include "sspi.h"
/* Authentication Functions: http://msdn.microsoft.com/en-us/library/windows/desktop/aa374731/ */
#ifdef WINPR_SSPI
extern const SecPkgInfoA NTLM_SecPkgInfoA;
extern const SecPkgInfoW NTLM_SecPkgInfoW;
extern const SecurityFunctionTableA NTLM_SecurityFunctionTableA;
extern const SecurityFunctionTableW NTLM_SecurityFunctionTableW;
extern const SecPkgInfoA CREDSSP_SecPkgInfoA;
extern const SecPkgInfoW CREDSSP_SecPkgInfoW;
extern const SecurityFunctionTableA CREDSSP_SecurityFunctionTableA;
extern const SecurityFunctionTableW CREDSSP_SecurityFunctionTableW;
extern const SecPkgInfoA SCHANNEL_SecPkgInfoA;
extern const SecPkgInfoW SCHANNEL_SecPkgInfoW;
extern const SecurityFunctionTableA SCHANNEL_SecurityFunctionTableA;
extern const SecurityFunctionTableW SCHANNEL_SecurityFunctionTableW;
const SecPkgInfoA* SecPkgInfoA_LIST[] =
{
&NTLM_SecPkgInfoA,
&CREDSSP_SecPkgInfoA,
&SCHANNEL_SecPkgInfoA
};
const SecPkgInfoW* SecPkgInfoW_LIST[] =
{
&NTLM_SecPkgInfoW,
&CREDSSP_SecPkgInfoW,
&SCHANNEL_SecPkgInfoW
};
SecurityFunctionTableA SSPI_SecurityFunctionTableA;
SecurityFunctionTableW SSPI_SecurityFunctionTableW;
struct _SecurityFunctionTableA_NAME
{
SEC_CHAR* Name;
const SecurityFunctionTableA* SecurityFunctionTable;
};
typedef struct _SecurityFunctionTableA_NAME SecurityFunctionTableA_NAME;
struct _SecurityFunctionTableW_NAME
{
SEC_WCHAR* Name;
const SecurityFunctionTableW* SecurityFunctionTable;
};
typedef struct _SecurityFunctionTableW_NAME SecurityFunctionTableW_NAME;
const SecurityFunctionTableA_NAME SecurityFunctionTableA_NAME_LIST[] =
{
{ "NTLM", &NTLM_SecurityFunctionTableA },
{ "CREDSSP", &CREDSSP_SecurityFunctionTableA },
{ "Schannel", &SCHANNEL_SecurityFunctionTableA }
};
WCHAR NTLM_NAME_W[] = { 'N','T','L','M','\0' };
WCHAR CREDSSP_NAME_W[] = { 'C','r','e','d','S','S','P','\0' };
WCHAR SCHANNEL_NAME_W[] = { 'S','c','h','a','n','n','e','l','\0' };
const SecurityFunctionTableW_NAME SecurityFunctionTableW_NAME_LIST[] =
{
{ NTLM_NAME_W, &NTLM_SecurityFunctionTableW },
{ CREDSSP_NAME_W, &CREDSSP_SecurityFunctionTableW },
{ SCHANNEL_NAME_W, &SCHANNEL_SecurityFunctionTableW }
};
#endif
#define SecHandle_LOWER_MAX 0xFFFFFFFF
#define SecHandle_UPPER_MAX 0xFFFFFFFE
struct _CONTEXT_BUFFER_ALLOC_ENTRY
{
void* contextBuffer;
UINT32 allocatorIndex;
};
typedef struct _CONTEXT_BUFFER_ALLOC_ENTRY CONTEXT_BUFFER_ALLOC_ENTRY;
struct _CONTEXT_BUFFER_ALLOC_TABLE
{
UINT32 cEntries;
UINT32 cMaxEntries;
CONTEXT_BUFFER_ALLOC_ENTRY* entries;
};
typedef struct _CONTEXT_BUFFER_ALLOC_TABLE CONTEXT_BUFFER_ALLOC_TABLE;
CONTEXT_BUFFER_ALLOC_TABLE ContextBufferAllocTable;
void sspi_ContextBufferAllocTableNew()
{
size_t size;
ContextBufferAllocTable.cEntries = 0;
ContextBufferAllocTable.cMaxEntries = 4;
size = sizeof(CONTEXT_BUFFER_ALLOC_ENTRY) * ContextBufferAllocTable.cMaxEntries;
ContextBufferAllocTable.entries = malloc(size);
ZeroMemory(ContextBufferAllocTable.entries, size);
}
void sspi_ContextBufferAllocTableGrow()
{
size_t size;
ContextBufferAllocTable.cEntries = 0;
ContextBufferAllocTable.cMaxEntries *= 2;
size = sizeof(CONTEXT_BUFFER_ALLOC_ENTRY) * ContextBufferAllocTable.cMaxEntries;
ContextBufferAllocTable.entries = realloc(ContextBufferAllocTable.entries, size);
ZeroMemory((void*) &ContextBufferAllocTable.entries[ContextBufferAllocTable.cMaxEntries / 2], size / 2);
}
void sspi_ContextBufferAllocTableFree()
{
ContextBufferAllocTable.cEntries = ContextBufferAllocTable.cMaxEntries = 0;
free(ContextBufferAllocTable.entries);
}
void* sspi_ContextBufferAlloc(UINT32 allocatorIndex, size_t size)
{
int index;
void* contextBuffer;
for (index = 0; index < (int) ContextBufferAllocTable.cMaxEntries; index++)
{
if (ContextBufferAllocTable.entries[index].contextBuffer == NULL)
{
contextBuffer = malloc(size);
ZeroMemory(contextBuffer, size);
ContextBufferAllocTable.cEntries++;
ContextBufferAllocTable.entries[index].contextBuffer = contextBuffer;
ContextBufferAllocTable.entries[index].allocatorIndex = allocatorIndex;
return ContextBufferAllocTable.entries[index].contextBuffer;
}
}
/* no available entry was found, the table needs to be grown */
sspi_ContextBufferAllocTableGrow();
/* the next call to sspi_ContextBufferAlloc() should now succeed */
return sspi_ContextBufferAlloc(allocatorIndex, size);
}
CREDENTIALS* sspi_CredentialsNew()
{
CREDENTIALS* credentials;
credentials = (CREDENTIALS*) malloc(sizeof(CREDENTIALS));
ZeroMemory(credentials, sizeof(CREDENTIALS));
if (credentials != NULL)
{
}
return credentials;
}
void sspi_CredentialsFree(CREDENTIALS* credentials)
{
if (!credentials)
return;
free(credentials);
}
void sspi_SecBufferAlloc(PSecBuffer SecBuffer, size_t size)
{
SecBuffer->cbBuffer = size;
SecBuffer->pvBuffer = malloc(size);
ZeroMemory(SecBuffer->pvBuffer, SecBuffer->cbBuffer);
}
void sspi_SecBufferFree(PSecBuffer SecBuffer)
{
free(SecBuffer->pvBuffer);
SecBuffer->pvBuffer = NULL;
SecBuffer->cbBuffer = 0;
}
SecHandle* sspi_SecureHandleAlloc()
{
SecHandle* handle = (SecHandle*) malloc(sizeof(SecHandle));
sspi_SecureHandleInit(handle);
return handle;
}
void sspi_SecureHandleInit(SecHandle* handle)
{
if (!handle)
return;
memset(handle, 0xFF, sizeof(SecHandle));
}
void sspi_SecureHandleInvalidate(SecHandle* handle)
{
if (!handle)
return;
sspi_SecureHandleInit(handle);
}
void* sspi_SecureHandleGetLowerPointer(SecHandle* handle)
{
void* pointer;
if (!handle || !SecIsValidHandle(handle))
return NULL;
pointer = (void*) ~((size_t) handle->dwLower);
return pointer;
}
void sspi_SecureHandleSetLowerPointer(SecHandle* handle, void* pointer)
{
if (!handle)
return;
handle->dwLower = (ULONG_PTR) (~((size_t) pointer));
}
void* sspi_SecureHandleGetUpperPointer(SecHandle* handle)
{
void* pointer;
if (!handle || !SecIsValidHandle(handle))
return NULL;
pointer = (void*) ~((size_t) handle->dwUpper);
return pointer;
}
void sspi_SecureHandleSetUpperPointer(SecHandle* handle, void* pointer)
{
if (!handle)
return;
handle->dwUpper = (ULONG_PTR) (~((size_t) pointer));
}
void sspi_SecureHandleFree(SecHandle* handle)
{
if (!handle)
return;
free(handle);
}
void sspi_SetAuthIdentity(SEC_WINNT_AUTH_IDENTITY* identity, char* user, char* domain, char* password)
{
identity->Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
if (user)
{
identity->UserLength = ConvertToUnicode(CP_UTF8, 0, user, -1, &identity->User, 0) - 1;
}
else
{
identity->User = (UINT16*) NULL;
identity->UserLength = 0;
}
if (domain)
{
identity->DomainLength = ConvertToUnicode(CP_UTF8, 0, domain, -1, &identity->Domain, 0) - 1;
}
else
{
identity->Domain = (UINT16*) NULL;
identity->DomainLength = 0;
}
if (password != NULL)
{
identity->PasswordLength = ConvertToUnicode(CP_UTF8, 0, password, -1, &identity->Password, 0) - 1;
}
else
{
identity->Password = NULL;
identity->PasswordLength = 0;
}
}
void sspi_CopyAuthIdentity(SEC_WINNT_AUTH_IDENTITY* identity, SEC_WINNT_AUTH_IDENTITY* srcIdentity)
{
if (identity->Flags == SEC_WINNT_AUTH_IDENTITY_ANSI)
{
sspi_SetAuthIdentity(identity, (char*) srcIdentity->User,
(char*) srcIdentity->Domain, (char*) srcIdentity->Password);
identity->Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
return;
}
identity->Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
identity->User = identity->Domain = identity->Password = NULL;
identity->UserLength = srcIdentity->UserLength;
if (identity->UserLength > 0)
{
identity->User = (UINT16*) malloc((identity->UserLength + 1) * sizeof(WCHAR));
CopyMemory(identity->User, srcIdentity->User, identity->UserLength * sizeof(WCHAR));
identity->User[identity->UserLength] = 0;
}
identity->DomainLength = srcIdentity->DomainLength;
if (identity->DomainLength > 0)
{
identity->Domain = (UINT16*) malloc((identity->DomainLength + 1) * sizeof(WCHAR));
CopyMemory(identity->Domain, srcIdentity->Domain, identity->DomainLength * sizeof(WCHAR));
identity->Domain[identity->DomainLength] = 0;
}
identity->PasswordLength = srcIdentity->PasswordLength;
if (identity->PasswordLength > 0)
{
identity->Password = (UINT16*) malloc((identity->PasswordLength + 1) * sizeof(WCHAR));
CopyMemory(identity->Password, srcIdentity->Password, identity->PasswordLength * sizeof(WCHAR));
identity->Password[identity->PasswordLength] = 0;
}
}
PSecBuffer sspi_FindSecBuffer(PSecBufferDesc pMessage, ULONG BufferType)
{
int index;
PSecBuffer pSecBuffer = NULL;
for (index = 0; index < pMessage->cBuffers; index++)
{
if (pMessage->pBuffers[index].BufferType == BufferType)
{
pSecBuffer = &pMessage->pBuffers[index];
break;
}
}
return pSecBuffer;
}
static BOOL sspi_initialized = FALSE;
void sspi_GlobalInit()
{
if (!sspi_initialized)
{
SSL_load_error_strings();
SSL_library_init();
sspi_ContextBufferAllocTableNew();
sspi_initialized = TRUE;
}
}
void sspi_GlobalFinish()
{
if (sspi_initialized)
{
sspi_ContextBufferAllocTableFree();
}
sspi_initialized = FALSE;
}
#ifndef WITH_NATIVE_SSPI
SecurityFunctionTableA* sspi_GetSecurityFunctionTableAByNameA(const SEC_CHAR* Name)
{
int index;
UINT32 cPackages;
cPackages = sizeof(SecPkgInfoA_LIST) / sizeof(*(SecPkgInfoA_LIST));
for (index = 0; index < (int) cPackages; index++)
{
if (strcmp(Name, SecurityFunctionTableA_NAME_LIST[index].Name) == 0)
{
return (SecurityFunctionTableA*) SecurityFunctionTableA_NAME_LIST[index].SecurityFunctionTable;
}
}
return NULL;
}
SecurityFunctionTableA* sspi_GetSecurityFunctionTableAByNameW(const SEC_WCHAR* Name)
{
return NULL;
}
SecurityFunctionTableW* sspi_GetSecurityFunctionTableWByNameW(const SEC_WCHAR* Name)
{
int index;
UINT32 cPackages;
cPackages = sizeof(SecPkgInfoW_LIST) / sizeof(*(SecPkgInfoW_LIST));
for (index = 0; index < (int) cPackages; index++)
{
if (lstrcmpW(Name, SecurityFunctionTableW_NAME_LIST[index].Name) == 0)
{
return (SecurityFunctionTableW*) SecurityFunctionTableW_NAME_LIST[index].SecurityFunctionTable;
}
}
return NULL;
}
SecurityFunctionTableW* sspi_GetSecurityFunctionTableWByNameA(const SEC_CHAR* Name)
{
SEC_WCHAR* NameW = NULL;
SecurityFunctionTableW* table;
ConvertToUnicode(CP_UTF8, 0, Name, -1, &NameW, 0);
table = sspi_GetSecurityFunctionTableWByNameW(NameW);
free(NameW);
return table;
}
void FreeContextBuffer_EnumerateSecurityPackages(void* contextBuffer);
void FreeContextBuffer_QuerySecurityPackageInfo(void* contextBuffer);
void sspi_ContextBufferFree(void* contextBuffer)
{
int index;
UINT32 allocatorIndex;
for (index = 0; index < (int) ContextBufferAllocTable.cMaxEntries; index++)
{
if (contextBuffer == ContextBufferAllocTable.entries[index].contextBuffer)
{
contextBuffer = ContextBufferAllocTable.entries[index].contextBuffer;
allocatorIndex = ContextBufferAllocTable.entries[index].allocatorIndex;
ContextBufferAllocTable.cEntries--;
ContextBufferAllocTable.entries[index].allocatorIndex = 0;
ContextBufferAllocTable.entries[index].contextBuffer = NULL;
switch (allocatorIndex)
{
case EnumerateSecurityPackagesIndex:
FreeContextBuffer_EnumerateSecurityPackages(contextBuffer);
break;
case QuerySecurityPackageInfoIndex:
FreeContextBuffer_QuerySecurityPackageInfo(contextBuffer);
break;
}
}
}
}
/* Package Management */
SECURITY_STATUS SEC_ENTRY EnumerateSecurityPackagesW(ULONG* pcPackages, PSecPkgInfoW* ppPackageInfo)
{
int index;
size_t size;
UINT32 cPackages;
SecPkgInfoW* pPackageInfo;
cPackages = sizeof(SecPkgInfoW_LIST) / sizeof(*(SecPkgInfoW_LIST));
size = sizeof(SecPkgInfoW) * cPackages;
pPackageInfo = (SecPkgInfoW*) sspi_ContextBufferAlloc(EnumerateSecurityPackagesIndex, size);
for (index = 0; index < (int) cPackages; index++)
{
pPackageInfo[index].fCapabilities = SecPkgInfoW_LIST[index]->fCapabilities;
pPackageInfo[index].wVersion = SecPkgInfoW_LIST[index]->wVersion;
pPackageInfo[index].wRPCID = SecPkgInfoW_LIST[index]->wRPCID;
pPackageInfo[index].cbMaxToken = SecPkgInfoW_LIST[index]->cbMaxToken;
pPackageInfo[index].Name = _wcsdup(SecPkgInfoW_LIST[index]->Name);
pPackageInfo[index].Comment = _wcsdup(SecPkgInfoW_LIST[index]->Comment);
}
*(pcPackages) = cPackages;
*(ppPackageInfo) = pPackageInfo;
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY EnumerateSecurityPackagesA(ULONG* pcPackages, PSecPkgInfoA* ppPackageInfo)
{
int index;
size_t size;
UINT32 cPackages;
SecPkgInfoA* pPackageInfo;
cPackages = sizeof(SecPkgInfoA_LIST) / sizeof(*(SecPkgInfoA_LIST));
size = sizeof(SecPkgInfoA) * cPackages;
pPackageInfo = (SecPkgInfoA*) sspi_ContextBufferAlloc(EnumerateSecurityPackagesIndex, size);
for (index = 0; index < (int) cPackages; index++)
{
pPackageInfo[index].fCapabilities = SecPkgInfoA_LIST[index]->fCapabilities;
pPackageInfo[index].wVersion = SecPkgInfoA_LIST[index]->wVersion;
pPackageInfo[index].wRPCID = SecPkgInfoA_LIST[index]->wRPCID;
pPackageInfo[index].cbMaxToken = SecPkgInfoA_LIST[index]->cbMaxToken;
pPackageInfo[index].Name = _strdup(SecPkgInfoA_LIST[index]->Name);
pPackageInfo[index].Comment = _strdup(SecPkgInfoA_LIST[index]->Comment);
}
*(pcPackages) = cPackages;
*(ppPackageInfo) = pPackageInfo;
return SEC_E_OK;
}
void FreeContextBuffer_EnumerateSecurityPackages(void* contextBuffer)
{
int index;
UINT32 cPackages;
SecPkgInfoA* pPackageInfo = (SecPkgInfoA*) contextBuffer;
cPackages = sizeof(SecPkgInfoA_LIST) / sizeof(*(SecPkgInfoA_LIST));
for (index = 0; index < (int) cPackages; index++)
{
if (pPackageInfo[index].Name)
free(pPackageInfo[index].Name);
if (pPackageInfo[index].Comment)
free(pPackageInfo[index].Comment);
}
free(pPackageInfo);
}
SecurityFunctionTableW* SEC_ENTRY InitSecurityInterfaceW(void)
{
return &SSPI_SecurityFunctionTableW;
}
SecurityFunctionTableA* SEC_ENTRY InitSecurityInterfaceA(void)
{
return &SSPI_SecurityFunctionTableA;
}
SECURITY_STATUS SEC_ENTRY QuerySecurityPackageInfoW(SEC_WCHAR* pszPackageName, PSecPkgInfoW* ppPackageInfo)
{
int index;
size_t size;
UINT32 cPackages;
SecPkgInfoW* pPackageInfo;
cPackages = sizeof(SecPkgInfoW_LIST) / sizeof(*(SecPkgInfoW_LIST));
for (index = 0; index < (int) cPackages; index++)
{
if (lstrcmpW(pszPackageName, SecPkgInfoW_LIST[index]->Name) == 0)
{
size = sizeof(SecPkgInfoW);
pPackageInfo = (SecPkgInfoW*) sspi_ContextBufferAlloc(QuerySecurityPackageInfoIndex, size);
pPackageInfo->fCapabilities = SecPkgInfoW_LIST[index]->fCapabilities;
pPackageInfo->wVersion = SecPkgInfoW_LIST[index]->wVersion;
pPackageInfo->wRPCID = SecPkgInfoW_LIST[index]->wRPCID;
pPackageInfo->cbMaxToken = SecPkgInfoW_LIST[index]->cbMaxToken;
pPackageInfo->Name = _wcsdup(SecPkgInfoW_LIST[index]->Name);
pPackageInfo->Comment = _wcsdup(SecPkgInfoW_LIST[index]->Comment);
*(ppPackageInfo) = pPackageInfo;
return SEC_E_OK;
}
}
*(ppPackageInfo) = NULL;
return SEC_E_SECPKG_NOT_FOUND;
}
SECURITY_STATUS SEC_ENTRY QuerySecurityPackageInfoA(SEC_CHAR* pszPackageName, PSecPkgInfoA* ppPackageInfo)
{
int index;
size_t size;
UINT32 cPackages;
SecPkgInfoA* pPackageInfo;
cPackages = sizeof(SecPkgInfoA_LIST) / sizeof(*(SecPkgInfoA_LIST));
for (index = 0; index < (int) cPackages; index++)
{
if (strcmp(pszPackageName, SecPkgInfoA_LIST[index]->Name) == 0)
{
size = sizeof(SecPkgInfoA);
pPackageInfo = (SecPkgInfoA*) sspi_ContextBufferAlloc(QuerySecurityPackageInfoIndex, size);
pPackageInfo->fCapabilities = SecPkgInfoA_LIST[index]->fCapabilities;
pPackageInfo->wVersion = SecPkgInfoA_LIST[index]->wVersion;
pPackageInfo->wRPCID = SecPkgInfoA_LIST[index]->wRPCID;
pPackageInfo->cbMaxToken = SecPkgInfoA_LIST[index]->cbMaxToken;
pPackageInfo->Name = _strdup(SecPkgInfoA_LIST[index]->Name);
pPackageInfo->Comment = _strdup(SecPkgInfoA_LIST[index]->Comment);
*(ppPackageInfo) = pPackageInfo;
return SEC_E_OK;
}
}
*(ppPackageInfo) = NULL;
return SEC_E_SECPKG_NOT_FOUND;
}
void FreeContextBuffer_QuerySecurityPackageInfo(void* contextBuffer)
{
SecPkgInfo* pPackageInfo = (SecPkgInfo*) contextBuffer;
if (pPackageInfo->Name)
free(pPackageInfo->Name);
if (pPackageInfo->Comment)
free(pPackageInfo->Comment);
free(pPackageInfo);
}
/* Credential Management */
SECURITY_STATUS SEC_ENTRY AcquireCredentialsHandleW(SEC_WCHAR* pszPrincipal, SEC_WCHAR* pszPackage,
ULONG fCredentialUse, void* pvLogonID, void* pAuthData, SEC_GET_KEY_FN pGetKeyFn,
void* pvGetKeyArgument, PCredHandle phCredential, PTimeStamp ptsExpiry)
{
SECURITY_STATUS status;
SecurityFunctionTableW* table = sspi_GetSecurityFunctionTableWByNameW(pszPackage);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->AcquireCredentialsHandleW == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->AcquireCredentialsHandleW(pszPrincipal, pszPackage, fCredentialUse,
pvLogonID, pAuthData, pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
return status;
}
SECURITY_STATUS SEC_ENTRY AcquireCredentialsHandleA(SEC_CHAR* pszPrincipal, SEC_CHAR* pszPackage,
ULONG fCredentialUse, void* pvLogonID, void* pAuthData, SEC_GET_KEY_FN pGetKeyFn,
void* pvGetKeyArgument, PCredHandle phCredential, PTimeStamp ptsExpiry)
{
SECURITY_STATUS status;
SecurityFunctionTableA* table = sspi_GetSecurityFunctionTableAByNameA(pszPackage);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->AcquireCredentialsHandleA == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->AcquireCredentialsHandleA(pszPrincipal, pszPackage, fCredentialUse,
pvLogonID, pAuthData, pGetKeyFn, pvGetKeyArgument, phCredential, ptsExpiry);
return status;
}
SECURITY_STATUS SEC_ENTRY ExportSecurityContext(PCtxtHandle phContext, ULONG fFlags, PSecBuffer pPackedContext, HANDLE* pToken)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY FreeCredentialsHandle(PCredHandle phCredential)
{
char* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phCredential);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->FreeCredentialsHandle == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->FreeCredentialsHandle(phCredential);
return status;
}
SECURITY_STATUS SEC_ENTRY ImportSecurityContextW(SEC_WCHAR* pszPackage, PSecBuffer pPackedContext, HANDLE pToken, PCtxtHandle phContext)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY ImportSecurityContextA(SEC_CHAR* pszPackage, PSecBuffer pPackedContext, HANDLE pToken, PCtxtHandle phContext)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY QueryCredentialsAttributesW(PCredHandle phCredential, ULONG ulAttribute, void* pBuffer)
{
SEC_WCHAR* Name;
SECURITY_STATUS status;
SecurityFunctionTableW* table;
Name = (SEC_WCHAR*) sspi_SecureHandleGetUpperPointer(phCredential);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableWByNameW(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->QueryCredentialsAttributesW == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->QueryCredentialsAttributesW(phCredential, ulAttribute, pBuffer);
return status;
}
SECURITY_STATUS SEC_ENTRY QueryCredentialsAttributesA(PCredHandle phCredential, ULONG ulAttribute, void* pBuffer)
{
char* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phCredential);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->QueryCredentialsAttributesA == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->QueryCredentialsAttributesA(phCredential, ulAttribute, pBuffer);
return status;
}
/* Context Management */
SECURITY_STATUS SEC_ENTRY AcceptSecurityContext(PCredHandle phCredential, PCtxtHandle phContext,
PSecBufferDesc pInput, ULONG fContextReq, ULONG TargetDataRep, PCtxtHandle phNewContext,
PSecBufferDesc pOutput, PULONG pfContextAttr, PTimeStamp ptsTimeStamp)
{
char* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phCredential);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->AcceptSecurityContext == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->AcceptSecurityContext(phCredential, phContext, pInput, fContextReq,
TargetDataRep, phNewContext, pOutput, pfContextAttr, ptsTimeStamp);
return status;
}
SECURITY_STATUS SEC_ENTRY ApplyControlToken(PCtxtHandle phContext, PSecBufferDesc pInput)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY CompleteAuthToken(PCtxtHandle phContext, PSecBufferDesc pToken)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY DeleteSecurityContext(PCtxtHandle phContext)
{
char* Name = NULL;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phContext);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->DeleteSecurityContext == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->DeleteSecurityContext(phContext);
return status;
}
SECURITY_STATUS SEC_ENTRY FreeContextBuffer(void* pvContextBuffer)
{
if (!pvContextBuffer)
return SEC_E_INVALID_HANDLE;
sspi_ContextBufferFree(pvContextBuffer);
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY ImpersonateSecurityContext(PCtxtHandle phContext)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY InitializeSecurityContextW(PCredHandle phCredential, PCtxtHandle phContext,
SEC_WCHAR* pszTargetName, ULONG fContextReq, ULONG Reserved1, ULONG TargetDataRep,
PSecBufferDesc pInput, ULONG Reserved2, PCtxtHandle phNewContext,
PSecBufferDesc pOutput, PULONG pfContextAttr, PTimeStamp ptsExpiry)
{
SEC_CHAR* Name;
SECURITY_STATUS status;
SecurityFunctionTableW* table;
Name = (SEC_CHAR*) sspi_SecureHandleGetUpperPointer(phCredential);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableWByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->InitializeSecurityContextW == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->InitializeSecurityContextW(phCredential, phContext,
pszTargetName, fContextReq, Reserved1, TargetDataRep,
pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
return status;
}
SECURITY_STATUS SEC_ENTRY InitializeSecurityContextA(PCredHandle phCredential, PCtxtHandle phContext,
SEC_CHAR* pszTargetName, ULONG fContextReq, ULONG Reserved1, ULONG TargetDataRep,
PSecBufferDesc pInput, ULONG Reserved2, PCtxtHandle phNewContext,
PSecBufferDesc pOutput, PULONG pfContextAttr, PTimeStamp ptsExpiry)
{
SEC_CHAR* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (SEC_CHAR*) sspi_SecureHandleGetUpperPointer(phCredential);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->InitializeSecurityContextA == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->InitializeSecurityContextA(phCredential, phContext,
pszTargetName, fContextReq, Reserved1, TargetDataRep,
pInput, Reserved2, phNewContext, pOutput, pfContextAttr, ptsExpiry);
return status;
}
SECURITY_STATUS SEC_ENTRY QueryContextAttributesW(PCtxtHandle phContext, ULONG ulAttribute, void* pBuffer)
{
SEC_CHAR* Name;
SECURITY_STATUS status;
SecurityFunctionTableW* table;
Name = (SEC_CHAR*) sspi_SecureHandleGetUpperPointer(phContext);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableWByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->QueryContextAttributesW == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->QueryContextAttributesW(phContext, ulAttribute, pBuffer);
return status;
}
SECURITY_STATUS SEC_ENTRY QueryContextAttributesA(PCtxtHandle phContext, ULONG ulAttribute, void* pBuffer)
{
SEC_CHAR* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (SEC_CHAR*) sspi_SecureHandleGetUpperPointer(phContext);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->QueryContextAttributesA == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->QueryContextAttributesA(phContext, ulAttribute, pBuffer);
return status;
}
SECURITY_STATUS SEC_ENTRY QuerySecurityContextToken(PCtxtHandle phContext, HANDLE* phToken)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY SetContextAttributes(PCtxtHandle phContext, ULONG ulAttribute, void* pBuffer, ULONG cbBuffer)
{
return SEC_E_OK;
}
SECURITY_STATUS SEC_ENTRY RevertSecurityContext(PCtxtHandle phContext)
{
return SEC_E_OK;
}
/* Message Support */
SECURITY_STATUS SEC_ENTRY DecryptMessage(PCtxtHandle phContext, PSecBufferDesc pMessage, ULONG MessageSeqNo, PULONG pfQOP)
{
char* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phContext);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->DecryptMessage == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->DecryptMessage(phContext, pMessage, MessageSeqNo, pfQOP);
return status;
}
SECURITY_STATUS SEC_ENTRY EncryptMessage(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo)
{
char* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phContext);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->EncryptMessage == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->EncryptMessage(phContext, fQOP, pMessage, MessageSeqNo);
return status;
}
SECURITY_STATUS SEC_ENTRY MakeSignature(PCtxtHandle phContext, ULONG fQOP, PSecBufferDesc pMessage, ULONG MessageSeqNo)
{
char* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phContext);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->MakeSignature == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->MakeSignature(phContext, fQOP, pMessage, MessageSeqNo);
return status;
}
SECURITY_STATUS SEC_ENTRY VerifySignature(PCtxtHandle phContext, PSecBufferDesc pMessage, ULONG MessageSeqNo, PULONG pfQOP)
{
char* Name;
SECURITY_STATUS status;
SecurityFunctionTableA* table;
Name = (char*) sspi_SecureHandleGetUpperPointer(phContext);
if (!Name)
return SEC_E_SECPKG_NOT_FOUND;
table = sspi_GetSecurityFunctionTableAByNameA(Name);
if (!table)
return SEC_E_SECPKG_NOT_FOUND;
if (table->VerifySignature == NULL)
return SEC_E_UNSUPPORTED_FUNCTION;
status = table->VerifySignature(phContext, pMessage, MessageSeqNo, pfQOP);
return status;
}
SecurityFunctionTableA SSPI_SecurityFunctionTableA =
{
1, /* dwVersion */
EnumerateSecurityPackagesA, /* EnumerateSecurityPackages */
QueryCredentialsAttributesA, /* QueryCredentialsAttributes */
AcquireCredentialsHandleA, /* AcquireCredentialsHandle */
FreeCredentialsHandle, /* FreeCredentialsHandle */
NULL, /* Reserved2 */
InitializeSecurityContextA, /* InitializeSecurityContext */
AcceptSecurityContext, /* AcceptSecurityContext */
CompleteAuthToken, /* CompleteAuthToken */
DeleteSecurityContext, /* DeleteSecurityContext */
ApplyControlToken, /* ApplyControlToken */
QueryContextAttributesA, /* QueryContextAttributes */
ImpersonateSecurityContext, /* ImpersonateSecurityContext */
RevertSecurityContext, /* RevertSecurityContext */
MakeSignature, /* MakeSignature */
VerifySignature, /* VerifySignature */
FreeContextBuffer, /* FreeContextBuffer */
QuerySecurityPackageInfoA, /* QuerySecurityPackageInfo */
NULL, /* Reserved3 */
NULL, /* Reserved4 */
ExportSecurityContext, /* ExportSecurityContext */
ImportSecurityContextA, /* ImportSecurityContext */
NULL, /* AddCredentials */
NULL, /* Reserved8 */
QuerySecurityContextToken, /* QuerySecurityContextToken */
EncryptMessage, /* EncryptMessage */
DecryptMessage, /* DecryptMessage */
SetContextAttributes, /* SetContextAttributes */
};
SecurityFunctionTableW SSPI_SecurityFunctionTableW =
{
1, /* dwVersion */
EnumerateSecurityPackagesW, /* EnumerateSecurityPackages */
QueryCredentialsAttributesW, /* QueryCredentialsAttributes */
AcquireCredentialsHandleW, /* AcquireCredentialsHandle */
FreeCredentialsHandle, /* FreeCredentialsHandle */
NULL, /* Reserved2 */
InitializeSecurityContextW, /* InitializeSecurityContext */
AcceptSecurityContext, /* AcceptSecurityContext */
CompleteAuthToken, /* CompleteAuthToken */
DeleteSecurityContext, /* DeleteSecurityContext */
ApplyControlToken, /* ApplyControlToken */
QueryContextAttributesW, /* QueryContextAttributes */
ImpersonateSecurityContext, /* ImpersonateSecurityContext */
RevertSecurityContext, /* RevertSecurityContext */
MakeSignature, /* MakeSignature */
VerifySignature, /* VerifySignature */
FreeContextBuffer, /* FreeContextBuffer */
QuerySecurityPackageInfoW, /* QuerySecurityPackageInfo */
NULL, /* Reserved3 */
NULL, /* Reserved4 */
ExportSecurityContext, /* ExportSecurityContext */
ImportSecurityContextW, /* ImportSecurityContext */
NULL, /* AddCredentials */
NULL, /* Reserved8 */
QuerySecurityContextToken, /* QuerySecurityContextToken */
EncryptMessage, /* EncryptMessage */
DecryptMessage, /* DecryptMessage */
SetContextAttributes, /* SetContextAttributes */
};
#endif
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_5715_3 |
crossvul-cpp_data_bad_4809_0 | /*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "git2/types.h"
#include "git2/errors.h"
#include "git2/refs.h"
#include "git2/revwalk.h"
#include "smart.h"
#include "util.h"
#include "netops.h"
#include "posix.h"
#include "buffer.h"
#include <ctype.h>
#define PKT_LEN_SIZE 4
static const char pkt_done_str[] = "0009done\n";
static const char pkt_flush_str[] = "0000";
static const char pkt_have_prefix[] = "0032have ";
static const char pkt_want_prefix[] = "0032want ";
static int flush_pkt(git_pkt **out)
{
git_pkt *pkt;
pkt = git__malloc(sizeof(git_pkt));
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_FLUSH;
*out = pkt;
return 0;
}
/* the rest of the line will be useful for multi_ack and multi_ack_detailed */
static int ack_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ack *pkt;
GIT_UNUSED(line);
GIT_UNUSED(len);
pkt = git__calloc(1, sizeof(git_pkt_ack));
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_ACK;
line += 3;
len -= 3;
if (len >= GIT_OID_HEXSZ) {
git_oid_fromstr(&pkt->oid, line + 1);
line += GIT_OID_HEXSZ + 1;
len -= GIT_OID_HEXSZ + 1;
}
if (len >= 7) {
if (!git__prefixcmp(line + 1, "continue"))
pkt->status = GIT_ACK_CONTINUE;
if (!git__prefixcmp(line + 1, "common"))
pkt->status = GIT_ACK_COMMON;
if (!git__prefixcmp(line + 1, "ready"))
pkt->status = GIT_ACK_READY;
}
*out = (git_pkt *) pkt;
return 0;
}
static int nak_pkt(git_pkt **out)
{
git_pkt *pkt;
pkt = git__malloc(sizeof(git_pkt));
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_NAK;
*out = pkt;
return 0;
}
static int pack_pkt(git_pkt **out)
{
git_pkt *pkt;
pkt = git__malloc(sizeof(git_pkt));
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_PACK;
*out = pkt;
return 0;
}
static int comment_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_comment *pkt;
size_t alloclen;
GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_comment), len);
GITERR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1);
pkt = git__malloc(alloclen);
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_COMMENT;
memcpy(pkt->comment, line, len);
pkt->comment[len] = '\0';
*out = (git_pkt *) pkt;
return 0;
}
static int err_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_err *pkt;
size_t alloclen;
/* Remove "ERR " from the line */
line += 4;
len -= 4;
GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len);
GITERR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1);
pkt = git__malloc(alloclen);
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_ERR;
pkt->len = (int)len;
memcpy(pkt->error, line, len);
pkt->error[len] = '\0';
*out = (git_pkt *) pkt;
return 0;
}
static int data_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_data *pkt;
size_t alloclen;
line++;
len--;
GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len);
pkt = git__malloc(alloclen);
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_DATA;
pkt->len = (int) len;
memcpy(pkt->data, line, len);
*out = (git_pkt *) pkt;
return 0;
}
static int sideband_progress_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_progress *pkt;
size_t alloclen;
line++;
len--;
GITERR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len);
pkt = git__malloc(alloclen);
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_PROGRESS;
pkt->len = (int) len;
memcpy(pkt->data, line, len);
*out = (git_pkt *) pkt;
return 0;
}
static int sideband_error_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_err *pkt;
size_t alloc_len;
line++;
len--;
GITERR_CHECK_ALLOC_ADD(&alloc_len, sizeof(git_pkt_err), len);
GITERR_CHECK_ALLOC_ADD(&alloc_len, alloc_len, 1);
pkt = git__malloc(alloc_len);
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_ERR;
pkt->len = (int)len;
memcpy(pkt->error, line, len);
pkt->error[len] = '\0';
*out = (git_pkt *)pkt;
return 0;
}
/*
* Parse an other-ref line.
*/
static int ref_pkt(git_pkt **out, const char *line, size_t len)
{
int error;
git_pkt_ref *pkt;
size_t alloclen;
pkt = git__malloc(sizeof(git_pkt_ref));
GITERR_CHECK_ALLOC(pkt);
memset(pkt, 0x0, sizeof(git_pkt_ref));
pkt->type = GIT_PKT_REF;
if ((error = git_oid_fromstr(&pkt->head.oid, line)) < 0)
goto error_out;
/* Check for a bit of consistency */
if (line[GIT_OID_HEXSZ] != ' ') {
giterr_set(GITERR_NET, "Error parsing pkt-line");
error = -1;
goto error_out;
}
/* Jump from the name */
line += GIT_OID_HEXSZ + 1;
len -= (GIT_OID_HEXSZ + 1);
if (line[len - 1] == '\n')
--len;
GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1);
pkt->head.name = git__malloc(alloclen);
GITERR_CHECK_ALLOC(pkt->head.name);
memcpy(pkt->head.name, line, len);
pkt->head.name[len] = '\0';
if (strlen(pkt->head.name) < len) {
pkt->capabilities = strchr(pkt->head.name, '\0') + 1;
}
*out = (git_pkt *)pkt;
return 0;
error_out:
git__free(pkt);
return error;
}
static int ok_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ok *pkt;
const char *ptr;
size_t alloc_len;
pkt = git__malloc(sizeof(*pkt));
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_OK;
line += 3; /* skip "ok " */
if (!(ptr = strchr(line, '\n'))) {
giterr_set(GITERR_NET, "Invalid packet line");
git__free(pkt);
return -1;
}
len = ptr - line;
GITERR_CHECK_ALLOC_ADD(&alloc_len, len, 1);
pkt->ref = git__malloc(alloc_len);
GITERR_CHECK_ALLOC(pkt->ref);
memcpy(pkt->ref, line, len);
pkt->ref[len] = '\0';
*out = (git_pkt *)pkt;
return 0;
}
static int ng_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ng *pkt;
const char *ptr;
size_t alloclen;
pkt = git__malloc(sizeof(*pkt));
GITERR_CHECK_ALLOC(pkt);
pkt->ref = NULL;
pkt->type = GIT_PKT_NG;
line += 3; /* skip "ng " */
if (!(ptr = strchr(line, ' ')))
goto out_err;
len = ptr - line;
GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1);
pkt->ref = git__malloc(alloclen);
GITERR_CHECK_ALLOC(pkt->ref);
memcpy(pkt->ref, line, len);
pkt->ref[len] = '\0';
line = ptr + 1;
if (!(ptr = strchr(line, '\n')))
goto out_err;
len = ptr - line;
GITERR_CHECK_ALLOC_ADD(&alloclen, len, 1);
pkt->msg = git__malloc(alloclen);
GITERR_CHECK_ALLOC(pkt->msg);
memcpy(pkt->msg, line, len);
pkt->msg[len] = '\0';
*out = (git_pkt *)pkt;
return 0;
out_err:
giterr_set(GITERR_NET, "Invalid packet line");
git__free(pkt->ref);
git__free(pkt);
return -1;
}
static int unpack_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_unpack *pkt;
GIT_UNUSED(len);
pkt = git__malloc(sizeof(*pkt));
GITERR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_UNPACK;
if (!git__prefixcmp(line, "unpack ok"))
pkt->unpack_ok = 1;
else
pkt->unpack_ok = 0;
*out = (git_pkt *)pkt;
return 0;
}
static int32_t parse_len(const char *line)
{
char num[PKT_LEN_SIZE + 1];
int i, k, error;
int32_t len;
const char *num_end;
memcpy(num, line, PKT_LEN_SIZE);
num[PKT_LEN_SIZE] = '\0';
for (i = 0; i < PKT_LEN_SIZE; ++i) {
if (!isxdigit(num[i])) {
/* Make sure there are no special characters before passing to error message */
for (k = 0; k < PKT_LEN_SIZE; ++k) {
if(!isprint(num[k])) {
num[k] = '.';
}
}
giterr_set(GITERR_NET, "invalid hex digit in length: '%s'", num);
return -1;
}
}
if ((error = git__strtol32(&len, num, &num_end, 16)) < 0)
return error;
return len;
}
/*
* As per the documentation, the syntax is:
*
* pkt-line = data-pkt / flush-pkt
* data-pkt = pkt-len pkt-payload
* pkt-len = 4*(HEXDIG)
* pkt-payload = (pkt-len -4)*(OCTET)
* flush-pkt = "0000"
*
* Which means that the first four bytes are the length of the line,
* in ASCII hexadecimal (including itself)
*/
int git_pkt_parse_line(
git_pkt **head, const char *line, const char **out, size_t bufflen)
{
int ret;
int32_t len;
/* Not even enough for the length */
if (bufflen > 0 && bufflen < PKT_LEN_SIZE)
return GIT_EBUFS;
len = parse_len(line);
if (len < 0) {
/*
* If we fail to parse the length, it might be because the
* server is trying to send us the packfile already.
*/
if (bufflen >= 4 && !git__prefixcmp(line, "PACK")) {
giterr_clear();
*out = line;
return pack_pkt(head);
}
return (int)len;
}
/*
* If we were given a buffer length, then make sure there is
* enough in the buffer to satisfy this line
*/
if (bufflen > 0 && bufflen < (size_t)len)
return GIT_EBUFS;
/*
* The length has to be exactly 0 in case of a flush
* packet or greater than PKT_LEN_SIZE, as the decoded
* length includes its own encoded length of four bytes.
*/
if (len != 0 && len < PKT_LEN_SIZE)
return GIT_ERROR;
line += PKT_LEN_SIZE;
/*
* TODO: How do we deal with empty lines? Try again? with the next
* line?
*/
if (len == PKT_LEN_SIZE) {
*head = NULL;
*out = line;
return 0;
}
if (len == 0) { /* Flush pkt */
*out = line;
return flush_pkt(head);
}
len -= PKT_LEN_SIZE; /* the encoded length includes its own size */
if (*line == GIT_SIDE_BAND_DATA)
ret = data_pkt(head, line, len);
else if (*line == GIT_SIDE_BAND_PROGRESS)
ret = sideband_progress_pkt(head, line, len);
else if (*line == GIT_SIDE_BAND_ERROR)
ret = sideband_error_pkt(head, line, len);
else if (!git__prefixcmp(line, "ACK"))
ret = ack_pkt(head, line, len);
else if (!git__prefixcmp(line, "NAK"))
ret = nak_pkt(head);
else if (!git__prefixcmp(line, "ERR "))
ret = err_pkt(head, line, len);
else if (*line == '#')
ret = comment_pkt(head, line, len);
else if (!git__prefixcmp(line, "ok"))
ret = ok_pkt(head, line, len);
else if (!git__prefixcmp(line, "ng"))
ret = ng_pkt(head, line, len);
else if (!git__prefixcmp(line, "unpack"))
ret = unpack_pkt(head, line, len);
else
ret = ref_pkt(head, line, len);
*out = line + len;
return ret;
}
void git_pkt_free(git_pkt *pkt)
{
if (pkt->type == GIT_PKT_REF) {
git_pkt_ref *p = (git_pkt_ref *) pkt;
git__free(p->head.name);
git__free(p->head.symref_target);
}
if (pkt->type == GIT_PKT_OK) {
git_pkt_ok *p = (git_pkt_ok *) pkt;
git__free(p->ref);
}
if (pkt->type == GIT_PKT_NG) {
git_pkt_ng *p = (git_pkt_ng *) pkt;
git__free(p->ref);
git__free(p->msg);
}
git__free(pkt);
}
int git_pkt_buffer_flush(git_buf *buf)
{
return git_buf_put(buf, pkt_flush_str, strlen(pkt_flush_str));
}
static int buffer_want_with_caps(const git_remote_head *head, transport_smart_caps *caps, git_buf *buf)
{
git_buf str = GIT_BUF_INIT;
char oid[GIT_OID_HEXSZ +1] = {0};
size_t len;
/* Prefer multi_ack_detailed */
if (caps->multi_ack_detailed)
git_buf_puts(&str, GIT_CAP_MULTI_ACK_DETAILED " ");
else if (caps->multi_ack)
git_buf_puts(&str, GIT_CAP_MULTI_ACK " ");
/* Prefer side-band-64k if the server supports both */
if (caps->side_band_64k)
git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND_64K);
else if (caps->side_band)
git_buf_printf(&str, "%s ", GIT_CAP_SIDE_BAND);
if (caps->include_tag)
git_buf_puts(&str, GIT_CAP_INCLUDE_TAG " ");
if (caps->thin_pack)
git_buf_puts(&str, GIT_CAP_THIN_PACK " ");
if (caps->ofs_delta)
git_buf_puts(&str, GIT_CAP_OFS_DELTA " ");
if (git_buf_oom(&str))
return -1;
len = strlen("XXXXwant ") + GIT_OID_HEXSZ + 1 /* NUL */ +
git_buf_len(&str) + 1 /* LF */;
if (len > 0xffff) {
giterr_set(GITERR_NET,
"Tried to produce packet with invalid length %" PRIuZ, len);
return -1;
}
git_buf_grow_by(buf, len);
git_oid_fmt(oid, &head->oid);
git_buf_printf(buf,
"%04xwant %s %s\n", (unsigned int)len, oid, git_buf_cstr(&str));
git_buf_free(&str);
GITERR_CHECK_ALLOC_BUF(buf);
return 0;
}
/*
* All "want" packets have the same length and format, so what we do
* is overwrite the OID each time.
*/
int git_pkt_buffer_wants(
const git_remote_head * const *refs,
size_t count,
transport_smart_caps *caps,
git_buf *buf)
{
size_t i = 0;
const git_remote_head *head;
if (caps->common) {
for (; i < count; ++i) {
head = refs[i];
if (!head->local)
break;
}
if (buffer_want_with_caps(refs[i], caps, buf) < 0)
return -1;
i++;
}
for (; i < count; ++i) {
char oid[GIT_OID_HEXSZ];
head = refs[i];
if (head->local)
continue;
git_oid_fmt(oid, &head->oid);
git_buf_put(buf, pkt_want_prefix, strlen(pkt_want_prefix));
git_buf_put(buf, oid, GIT_OID_HEXSZ);
git_buf_putc(buf, '\n');
if (git_buf_oom(buf))
return -1;
}
return git_pkt_buffer_flush(buf);
}
int git_pkt_buffer_have(git_oid *oid, git_buf *buf)
{
char oidhex[GIT_OID_HEXSZ + 1];
memset(oidhex, 0x0, sizeof(oidhex));
git_oid_fmt(oidhex, oid);
return git_buf_printf(buf, "%s%s\n", pkt_have_prefix, oidhex);
}
int git_pkt_buffer_done(git_buf *buf)
{
return git_buf_puts(buf, pkt_done_str);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_4809_0 |
crossvul-cpp_data_good_192_0 | /*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "avcodec.h"
#include "dct.h"
#include "faanidct.h"
#include "idctdsp.h"
#include "simple_idct.h"
#include "xvididct.h"
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st,
const uint8_t *src_scantable)
{
int i, end;
st->scantable = src_scantable;
for (i = 0; i < 64; i++) {
int j = src_scantable[i];
st->permutated[i] = permutation[j];
}
end = -1;
for (i = 0; i < 64; i++) {
int j = st->permutated[i];
if (j > end)
end = j;
st->raster_end[i] = end;
}
}
av_cold void ff_init_scantable_permutation(uint8_t *idct_permutation,
enum idct_permutation_type perm_type)
{
int i;
if (ARCH_X86)
if (ff_init_scantable_permutation_x86(idct_permutation,
perm_type))
return;
switch (perm_type) {
case FF_IDCT_PERM_NONE:
for (i = 0; i < 64; i++)
idct_permutation[i] = i;
break;
case FF_IDCT_PERM_LIBMPEG2:
for (i = 0; i < 64; i++)
idct_permutation[i] = (i & 0x38) | ((i & 6) >> 1) | ((i & 1) << 2);
break;
case FF_IDCT_PERM_TRANSPOSE:
for (i = 0; i < 64; i++)
idct_permutation[i] = ((i & 7) << 3) | (i >> 3);
break;
case FF_IDCT_PERM_PARTTRANS:
for (i = 0; i < 64; i++)
idct_permutation[i] = (i & 0x24) | ((i & 3) << 3) | ((i >> 3) & 3);
break;
default:
av_log(NULL, AV_LOG_ERROR,
"Internal error, IDCT permutation not set\n");
}
}
void ff_put_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
ptrdiff_t line_size)
{
int i;
/* read the pixels */
for (i = 0; i < 8; i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels[4] = av_clip_uint8(block[4]);
pixels[5] = av_clip_uint8(block[5]);
pixels[6] = av_clip_uint8(block[6]);
pixels[7] = av_clip_uint8(block[7]);
pixels += line_size;
block += 8;
}
}
static void put_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels[2] = av_clip_uint8(block[2]);
pixels[3] = av_clip_uint8(block[3]);
pixels += line_size;
block += 8;
}
}
static void put_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(block[0]);
pixels[1] = av_clip_uint8(block[1]);
pixels += line_size;
block += 8;
}
}
static void put_signed_pixels_clamped_c(const int16_t *block,
uint8_t *av_restrict pixels,
ptrdiff_t line_size)
{
int i, j;
for (i = 0; i < 8; i++) {
for (j = 0; j < 8; j++) {
if (*block < -128)
*pixels = 0;
else if (*block > 127)
*pixels = 255;
else
*pixels = (uint8_t) (*block + 128);
block++;
pixels++;
}
pixels += (line_size - 8);
}
}
void ff_add_pixels_clamped_c(const int16_t *block, uint8_t *av_restrict pixels,
ptrdiff_t line_size)
{
int i;
/* read the pixels */
for (i = 0; i < 8; i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
pixels[4] = av_clip_uint8(pixels[4] + block[4]);
pixels[5] = av_clip_uint8(pixels[5] + block[5]);
pixels[6] = av_clip_uint8(pixels[6] + block[6]);
pixels[7] = av_clip_uint8(pixels[7] + block[7]);
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped4_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<4;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels[2] = av_clip_uint8(pixels[2] + block[2]);
pixels[3] = av_clip_uint8(pixels[3] + block[3]);
pixels += line_size;
block += 8;
}
}
static void add_pixels_clamped2_c(const int16_t *block, uint8_t *av_restrict pixels,
int line_size)
{
int i;
/* read the pixels */
for(i=0;i<2;i++) {
pixels[0] = av_clip_uint8(pixels[0] + block[0]);
pixels[1] = av_clip_uint8(pixels[1] + block[1]);
pixels += line_size;
block += 8;
}
}
static void ff_jref_idct4_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
ff_j_rev_dct4 (block);
put_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct4_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
ff_j_rev_dct4 (block);
add_pixels_clamped4_c(block, dest, line_size);
}
static void ff_jref_idct2_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
ff_j_rev_dct2 (block);
put_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct2_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
ff_j_rev_dct2 (block);
add_pixels_clamped2_c(block, dest, line_size);
}
static void ff_jref_idct1_put(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
dest[0] = av_clip_uint8((block[0] + 4)>>3);
}
static void ff_jref_idct1_add(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
{
dest[0] = av_clip_uint8(dest[0] + ((block[0] + 4)>>3));
}
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
{
const unsigned high_bit_depth = avctx->bits_per_raw_sample > 8;
if (avctx->lowres==1) {
c->idct_put = ff_jref_idct4_put;
c->idct_add = ff_jref_idct4_add;
c->idct = ff_j_rev_dct4;
c->perm_type = FF_IDCT_PERM_NONE;
} else if (avctx->lowres==2) {
c->idct_put = ff_jref_idct2_put;
c->idct_add = ff_jref_idct2_add;
c->idct = ff_j_rev_dct2;
c->perm_type = FF_IDCT_PERM_NONE;
} else if (avctx->lowres==3) {
c->idct_put = ff_jref_idct1_put;
c->idct_add = ff_jref_idct1_add;
c->idct = ff_j_rev_dct1;
c->perm_type = FF_IDCT_PERM_NONE;
} else {
if (avctx->bits_per_raw_sample == 10 || avctx->bits_per_raw_sample == 9) {
/* 10-bit MPEG-4 Simple Studio Profile requires a higher precision IDCT
However, it only uses idct_put */
if (c->mpeg4_studio_profile)
c->idct_put = ff_simple_idct_put_int32_10bit;
else {
c->idct_put = ff_simple_idct_put_int16_10bit;
c->idct_add = ff_simple_idct_add_int16_10bit;
c->idct = ff_simple_idct_int16_10bit;
}
c->perm_type = FF_IDCT_PERM_NONE;
} else if (avctx->bits_per_raw_sample == 12) {
c->idct_put = ff_simple_idct_put_int16_12bit;
c->idct_add = ff_simple_idct_add_int16_12bit;
c->idct = ff_simple_idct_int16_12bit;
c->perm_type = FF_IDCT_PERM_NONE;
} else {
if (avctx->idct_algo == FF_IDCT_INT) {
c->idct_put = ff_jref_idct_put;
c->idct_add = ff_jref_idct_add;
c->idct = ff_j_rev_dct;
c->perm_type = FF_IDCT_PERM_LIBMPEG2;
#if CONFIG_FAANIDCT
} else if (avctx->idct_algo == FF_IDCT_FAAN) {
c->idct_put = ff_faanidct_put;
c->idct_add = ff_faanidct_add;
c->idct = ff_faanidct;
c->perm_type = FF_IDCT_PERM_NONE;
#endif /* CONFIG_FAANIDCT */
} else { // accurate/default
/* Be sure FF_IDCT_NONE will select this one, since it uses FF_IDCT_PERM_NONE */
c->idct_put = ff_simple_idct_put_int16_8bit;
c->idct_add = ff_simple_idct_add_int16_8bit;
c->idct = ff_simple_idct_int16_8bit;
c->perm_type = FF_IDCT_PERM_NONE;
}
}
}
c->put_pixels_clamped = ff_put_pixels_clamped_c;
c->put_signed_pixels_clamped = put_signed_pixels_clamped_c;
c->add_pixels_clamped = ff_add_pixels_clamped_c;
if (CONFIG_MPEG4_DECODER && avctx->idct_algo == FF_IDCT_XVID)
ff_xvid_idct_init(c, avctx);
if (ARCH_AARCH64)
ff_idctdsp_init_aarch64(c, avctx, high_bit_depth);
if (ARCH_ALPHA)
ff_idctdsp_init_alpha(c, avctx, high_bit_depth);
if (ARCH_ARM)
ff_idctdsp_init_arm(c, avctx, high_bit_depth);
if (ARCH_PPC)
ff_idctdsp_init_ppc(c, avctx, high_bit_depth);
if (ARCH_X86)
ff_idctdsp_init_x86(c, avctx, high_bit_depth);
if (ARCH_MIPS)
ff_idctdsp_init_mips(c, avctx, high_bit_depth);
ff_init_scantable_permutation(c->idct_permutation,
c->perm_type);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_192_0 |
crossvul-cpp_data_bad_4058_1 | /*
* rfbserver.c - deal with server-side of the RFB protocol.
*/
/*
* Copyright (C) 2011-2012 D. R. Commander
* Copyright (C) 2005 Rohit Kumar, Johannes E. Schindelin
* Copyright (C) 2002 RealVNC Ltd.
* OSXvnc Copyright (C) 2001 Dan McGuirk <mcguirk@incompleteness.net>.
* Original Xvnc code Copyright (C) 1999 AT&T Laboratories Cambridge.
* All Rights Reserved.
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifdef __STRICT_ANSI__
#define _BSD_SOURCE
#define _POSIX_SOURCE
#define _XOPEN_SOURCE 600
#endif
#include <stdio.h>
#include <string.h>
#include <rfb/rfb.h>
#include <rfb/rfbregion.h>
#include "private.h"
#include "rfb/rfbconfig.h"
#ifdef LIBVNCSERVER_HAVE_FCNTL_H
#include <fcntl.h>
#endif
#ifdef WIN32
#include <io.h>
#else
#include <pwd.h>
#endif
#include "sockets.h"
#ifdef DEBUGPROTO
#undef DEBUGPROTO
#define DEBUGPROTO(x) x
#else
#define DEBUGPROTO(x)
#endif
#include <stdarg.h>
#include "scale.h"
/* stst() */
#include <sys/types.h>
#include <sys/stat.h>
#if LIBVNCSERVER_HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifndef WIN32
/* readdir() */
#include <dirent.h>
#endif
/* errno */
#include <errno.h>
/* strftime() */
#include <time.h>
/* INT_MAX */
#include <limits.h>
#ifdef LIBVNCSERVER_WITH_WEBSOCKETS
#include "rfbssl.h"
#endif
#ifdef _MSC_VER
/* Prevent POSIX deprecation warnings */
#define close _close
#define strdup _strdup
#endif
#ifdef WIN32
#include <direct.h>
#ifdef __MINGW32__
#define mkdir(path, perms) mkdir(path) /* Omit the perms argument to match POSIX signature */
#else /* MSVC and other windows compilers */
#define mkdir(path, perms) _mkdir(path) /* Omit the perms argument to match POSIX signature */
#endif /* __MINGW32__ else... */
#ifndef S_ISDIR
#define S_ISDIR(m) (((m) & S_IFDIR) == S_IFDIR)
#endif
#endif
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
/*
* Map of quality levels to provide compatibility with TightVNC/TigerVNC
* clients. This emulates the behavior of the TigerVNC Server.
*/
static const int tight2turbo_qual[10] = {
15, 29, 41, 42, 62, 77, 79, 86, 92, 100
};
static const int tight2turbo_subsamp[10] = {
1, 1, 1, 2, 2, 2, 0, 0, 0, 0
};
#endif
static void rfbProcessClientProtocolVersion(rfbClientPtr cl);
static void rfbProcessClientNormalMessage(rfbClientPtr cl);
static void rfbProcessClientInitMessage(rfbClientPtr cl);
#if defined(LIBVNCSERVER_HAVE_LIBPTHREAD) || defined(LIBVNCSERVER_HAVE_WIN32THREADS)
void rfbIncrClientRef(rfbClientPtr cl)
{
LOCK(cl->refCountMutex);
cl->refCount++;
UNLOCK(cl->refCountMutex);
}
void rfbDecrClientRef(rfbClientPtr cl)
{
LOCK(cl->refCountMutex);
cl->refCount--;
if(cl->refCount<=0) /* just to be sure also < 0 */
TSIGNAL(cl->deleteCond);
UNLOCK(cl->refCountMutex);
}
#else
void rfbIncrClientRef(rfbClientPtr cl) {}
void rfbDecrClientRef(rfbClientPtr cl) {}
#endif
#if defined(LIBVNCSERVER_HAVE_LIBPTHREAD) || defined(LIBVNCSERVER_HAVE_WIN32THREADS)
static MUTEX(rfbClientListMutex);
#endif
struct rfbClientIterator {
rfbClientPtr next;
rfbScreenInfoPtr screen;
rfbBool closedToo;
};
void
rfbClientListInit(rfbScreenInfoPtr rfbScreen)
{
if(sizeof(rfbBool)!=1) {
/* a sanity check */
fprintf(stderr,"rfbBool's size is not 1 (%d)!\n",(int)sizeof(rfbBool));
/* we cannot continue, because rfbBool is supposed to be char everywhere */
exit(1);
}
rfbScreen->clientHead = NULL;
INIT_MUTEX(rfbClientListMutex);
}
rfbClientIteratorPtr
rfbGetClientIterator(rfbScreenInfoPtr rfbScreen)
{
rfbClientIteratorPtr i =
(rfbClientIteratorPtr)malloc(sizeof(struct rfbClientIterator));
if(i) {
i->next = NULL;
i->screen = rfbScreen;
i->closedToo = FALSE;
}
return i;
}
rfbClientIteratorPtr
rfbGetClientIteratorWithClosed(rfbScreenInfoPtr rfbScreen)
{
rfbClientIteratorPtr i =
(rfbClientIteratorPtr)malloc(sizeof(struct rfbClientIterator));
if(i) {
i->next = NULL;
i->screen = rfbScreen;
i->closedToo = TRUE;
}
return i;
}
rfbClientPtr
rfbClientIteratorHead(rfbClientIteratorPtr i)
{
#if defined(LIBVNCSERVER_HAVE_LIBPTHREAD) || defined(LIBVNCSERVER_HAVE_WIN32THREADS)
if(i->next != 0) {
rfbDecrClientRef(i->next);
rfbIncrClientRef(i->screen->clientHead);
}
#endif
LOCK(rfbClientListMutex);
i->next = i->screen->clientHead;
UNLOCK(rfbClientListMutex);
return i->next;
}
rfbClientPtr
rfbClientIteratorNext(rfbClientIteratorPtr i)
{
if(i->next == 0) {
LOCK(rfbClientListMutex);
i->next = i->screen->clientHead;
UNLOCK(rfbClientListMutex);
} else {
rfbClientPtr cl = i->next;
i->next = i->next->next;
rfbDecrClientRef(cl);
}
#if defined(LIBVNCSERVER_HAVE_LIBPTHREAD) || defined(LIBVNCSERVER_HAVE_WIN32THREADS)
if(!i->closedToo)
while(i->next && i->next->sock<0)
i->next = i->next->next;
if(i->next)
rfbIncrClientRef(i->next);
#endif
return i->next;
}
void
rfbReleaseClientIterator(rfbClientIteratorPtr iterator)
{
if(iterator->next) rfbDecrClientRef(iterator->next);
free(iterator);
}
/*
* rfbNewClientConnection is called from sockets.c when a new connection
* comes in.
*/
void
rfbNewClientConnection(rfbScreenInfoPtr rfbScreen,
rfbSocket sock)
{
rfbNewClient(rfbScreen,sock);
}
/*
* rfbReverseConnection is called to make an outward
* connection to a "listening" RFB client.
*/
rfbClientPtr
rfbReverseConnection(rfbScreenInfoPtr rfbScreen,
char *host,
int port)
{
rfbSocket sock;
rfbClientPtr cl;
if ((sock = rfbConnect(rfbScreen, host, port)) < 0)
return (rfbClientPtr)NULL;
cl = rfbNewClient(rfbScreen, sock);
if (cl) {
cl->reverseConnection = TRUE;
}
return cl;
}
void
rfbSetProtocolVersion(rfbScreenInfoPtr rfbScreen, int major_, int minor_)
{
/* Permit the server to set the version to report */
/* TODO: sanity checking */
if ((major_==3) && (minor_ > 2 && minor_ < 9))
{
rfbScreen->protocolMajorVersion = major_;
rfbScreen->protocolMinorVersion = minor_;
}
else
rfbLog("rfbSetProtocolVersion(%d,%d) set to invalid values\n", major_, minor_);
}
/*
* rfbNewClient is called when a new connection has been made by whatever
* means.
*/
static rfbClientPtr
rfbNewTCPOrUDPClient(rfbScreenInfoPtr rfbScreen,
rfbSocket sock,
rfbBool isUDP)
{
rfbProtocolVersionMsg pv;
rfbClientIteratorPtr iterator;
rfbClientPtr cl,cl_;
#ifdef LIBVNCSERVER_IPv6
struct sockaddr_storage addr;
#else
struct sockaddr_in addr;
#endif
socklen_t addrlen = sizeof(addr);
rfbProtocolExtension* extension;
cl = (rfbClientPtr)calloc(sizeof(rfbClientRec),1);
if (!cl)
return NULL;
cl->screen = rfbScreen;
cl->sock = sock;
cl->viewOnly = FALSE;
/* setup pseudo scaling */
cl->scaledScreen = rfbScreen;
cl->scaledScreen->scaledScreenRefCount++;
rfbResetStats(cl);
cl->clientData = NULL;
cl->clientGoneHook = rfbDoNothingWithClient;
if(isUDP) {
rfbLog(" accepted UDP client\n");
} else {
#ifdef LIBVNCSERVER_IPv6
char host[1024];
#endif
int one=1;
size_t otherClientsCount = 0;
getpeername(sock, (struct sockaddr *)&addr, &addrlen);
#ifdef LIBVNCSERVER_IPv6
if(getnameinfo((struct sockaddr*)&addr, addrlen, host, sizeof(host), NULL, 0, NI_NUMERICHOST) != 0) {
rfbLogPerror("rfbNewClient: error in getnameinfo");
cl->host = strdup("");
}
else
cl->host = strdup(host);
#else
cl->host = strdup(inet_ntoa(addr.sin_addr));
#endif
iterator = rfbGetClientIterator(rfbScreen);
while ((cl_ = rfbClientIteratorNext(iterator)) != NULL)
++otherClientsCount;
rfbReleaseClientIterator(iterator);
rfbLog(" %lu other clients\n", (unsigned long) otherClientsCount);
if(!rfbSetNonBlocking(sock)) {
rfbCloseSocket(sock);
return NULL;
}
if (setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
(char *)&one, sizeof(one)) < 0) {
rfbLogPerror("setsockopt failed: can't set TCP_NODELAY flag, non TCP socket?");
}
FD_SET(sock,&(rfbScreen->allFds));
rfbScreen->maxFd = rfbMax(sock,rfbScreen->maxFd);
INIT_MUTEX(cl->outputMutex);
INIT_MUTEX(cl->refCountMutex);
INIT_MUTEX(cl->sendMutex);
INIT_COND(cl->deleteCond);
cl->state = RFB_PROTOCOL_VERSION;
cl->reverseConnection = FALSE;
cl->readyForSetColourMapEntries = FALSE;
cl->useCopyRect = FALSE;
cl->preferredEncoding = -1;
cl->correMaxWidth = 48;
cl->correMaxHeight = 48;
#ifdef LIBVNCSERVER_HAVE_LIBZ
cl->zrleData = NULL;
#endif
cl->copyRegion = sraRgnCreate();
cl->copyDX = 0;
cl->copyDY = 0;
cl->modifiedRegion =
sraRgnCreateRect(0,0,rfbScreen->width,rfbScreen->height);
INIT_MUTEX(cl->updateMutex);
INIT_COND(cl->updateCond);
cl->requestedRegion = sraRgnCreate();
cl->format = cl->screen->serverFormat;
cl->translateFn = rfbTranslateNone;
cl->translateLookupTable = NULL;
LOCK(rfbClientListMutex);
#if defined(LIBVNCSERVER_HAVE_LIBPTHREAD) || defined(LIBVNCSERVER_HAVE_WIN32THREADS)
cl->refCount = 0;
#endif
cl->next = rfbScreen->clientHead;
cl->prev = NULL;
if (rfbScreen->clientHead)
rfbScreen->clientHead->prev = cl;
rfbScreen->clientHead = cl;
UNLOCK(rfbClientListMutex);
#if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG)
cl->tightQualityLevel = -1;
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
cl->tightCompressLevel = TIGHT_DEFAULT_COMPRESSION;
cl->turboSubsampLevel = TURBO_DEFAULT_SUBSAMP;
{
int i;
for (i = 0; i < 4; i++)
cl->zsActive[i] = FALSE;
}
#endif
#endif
cl->fileTransfer.fd = -1;
cl->enableCursorShapeUpdates = FALSE;
cl->enableCursorPosUpdates = FALSE;
cl->useRichCursorEncoding = FALSE;
cl->enableLastRectEncoding = FALSE;
cl->enableKeyboardLedState = FALSE;
cl->enableSupportedMessages = FALSE;
cl->enableSupportedEncodings = FALSE;
cl->enableServerIdentity = FALSE;
cl->lastKeyboardLedState = -1;
cl->cursorX = rfbScreen->cursorX;
cl->cursorY = rfbScreen->cursorY;
cl->useNewFBSize = FALSE;
cl->useExtDesktopSize = FALSE;
cl->requestedDesktopSizeChange = 0;
cl->lastDesktopSizeChangeError = 0;
#ifdef LIBVNCSERVER_HAVE_LIBZ
cl->compStreamInited = FALSE;
cl->compStream.total_in = 0;
cl->compStream.total_out = 0;
cl->compStream.zalloc = Z_NULL;
cl->compStream.zfree = Z_NULL;
cl->compStream.opaque = Z_NULL;
cl->zlibCompressLevel = 5;
#endif
cl->progressiveSliceY = 0;
cl->extensions = NULL;
cl->lastPtrX = -1;
#ifdef LIBVNCSERVER_HAVE_LIBPTHREAD
cl->pipe_notify_client_thread[0] = -1;
cl->pipe_notify_client_thread[1] = -1;
#endif
#ifdef LIBVNCSERVER_WITH_WEBSOCKETS
/*
* Wait a few ms for the client to send WebSockets connection (TLS/SSL or plain)
*/
if (!webSocketsCheck(cl)) {
/* Error reporting handled in webSocketsHandshake */
rfbCloseClient(cl);
rfbClientConnectionGone(cl);
return NULL;
}
#endif
sprintf(pv,rfbProtocolVersionFormat,rfbScreen->protocolMajorVersion,
rfbScreen->protocolMinorVersion);
if (rfbWriteExact(cl, pv, sz_rfbProtocolVersionMsg) < 0) {
rfbLogPerror("rfbNewClient: write");
rfbCloseClient(cl);
rfbClientConnectionGone(cl);
return NULL;
}
}
for(extension = rfbGetExtensionIterator(); extension;
extension=extension->next) {
void* data = NULL;
/* if the extension does not have a newClient method, it wants
* to be initialized later. */
if(extension->newClient && extension->newClient(cl, &data))
rfbEnableExtension(cl, extension, data);
}
rfbReleaseExtensionIterator();
switch (cl->screen->newClientHook(cl)) {
case RFB_CLIENT_ON_HOLD:
cl->onHold = TRUE;
break;
case RFB_CLIENT_ACCEPT:
cl->onHold = FALSE;
break;
case RFB_CLIENT_REFUSE:
rfbCloseClient(cl);
rfbClientConnectionGone(cl);
cl = NULL;
break;
}
return cl;
}
rfbClientPtr
rfbNewClient(rfbScreenInfoPtr rfbScreen,
rfbSocket sock)
{
return(rfbNewTCPOrUDPClient(rfbScreen,sock,FALSE));
}
rfbClientPtr
rfbNewUDPClient(rfbScreenInfoPtr rfbScreen)
{
return((rfbScreen->udpClient=
rfbNewTCPOrUDPClient(rfbScreen,rfbScreen->udpSock,TRUE)));
}
/*
* rfbClientConnectionGone is called from sockets.c just after a connection
* has gone away.
*/
void
rfbClientConnectionGone(rfbClientPtr cl)
{
#if defined(LIBVNCSERVER_HAVE_LIBZ) && defined(LIBVNCSERVER_HAVE_LIBJPEG)
int i;
#endif
LOCK(rfbClientListMutex);
if (cl->prev)
cl->prev->next = cl->next;
else
cl->screen->clientHead = cl->next;
if (cl->next)
cl->next->prev = cl->prev;
UNLOCK(rfbClientListMutex);
#if defined(LIBVNCSERVER_HAVE_LIBPTHREAD) || defined(LIBVNCSERVER_HAVE_WIN32THREADS)
if(cl->screen->backgroundLoop != FALSE) {
int i;
do {
LOCK(cl->refCountMutex);
i=cl->refCount;
if(i>0)
WAIT(cl->deleteCond,cl->refCountMutex);
UNLOCK(cl->refCountMutex);
} while(i>0);
}
#endif
if(cl->sock != RFB_INVALID_SOCKET)
rfbCloseSocket(cl->sock);
if (cl->scaledScreen!=NULL)
cl->scaledScreen->scaledScreenRefCount--;
#ifdef LIBVNCSERVER_HAVE_LIBZ
rfbFreeZrleData(cl);
#endif
rfbFreeUltraData(cl);
/* free buffers holding pixel data before and after encoding */
free(cl->beforeEncBuf);
free(cl->afterEncBuf);
if(cl->sock != RFB_INVALID_SOCKET)
FD_CLR(cl->sock,&(cl->screen->allFds));
cl->clientGoneHook(cl);
rfbLog("Client %s gone\n",cl->host);
free(cl->host);
#ifdef LIBVNCSERVER_HAVE_LIBZ
/* Release the compression state structures if any. */
if ( cl->compStreamInited ) {
deflateEnd( &(cl->compStream) );
}
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
for (i = 0; i < 4; i++) {
if (cl->zsActive[i])
deflateEnd(&cl->zsStruct[i]);
}
#endif
#endif
if (cl->screen->pointerClient == cl)
cl->screen->pointerClient = NULL;
sraRgnDestroy(cl->modifiedRegion);
sraRgnDestroy(cl->requestedRegion);
sraRgnDestroy(cl->copyRegion);
if (cl->translateLookupTable) free(cl->translateLookupTable);
TINI_COND(cl->updateCond);
TINI_MUTEX(cl->updateMutex);
/* make sure outputMutex is unlocked before destroying */
LOCK(cl->outputMutex);
UNLOCK(cl->outputMutex);
TINI_MUTEX(cl->outputMutex);
LOCK(cl->sendMutex);
UNLOCK(cl->sendMutex);
TINI_MUTEX(cl->sendMutex);
#ifdef LIBVNCSERVER_HAVE_LIBPTHREAD
close(cl->pipe_notify_client_thread[0]);
close(cl->pipe_notify_client_thread[1]);
#endif
rfbPrintStats(cl);
rfbResetStats(cl);
free(cl);
}
/*
* rfbProcessClientMessage is called when there is data to read from a client.
*/
void
rfbProcessClientMessage(rfbClientPtr cl)
{
switch (cl->state) {
case RFB_PROTOCOL_VERSION:
rfbProcessClientProtocolVersion(cl);
return;
case RFB_SECURITY_TYPE:
rfbProcessClientSecurityType(cl);
return;
case RFB_AUTHENTICATION:
rfbAuthProcessClientMessage(cl);
return;
case RFB_INITIALISATION:
case RFB_INITIALISATION_SHARED:
rfbProcessClientInitMessage(cl);
return;
default:
rfbProcessClientNormalMessage(cl);
return;
}
}
/*
* rfbProcessClientProtocolVersion is called when the client sends its
* protocol version.
*/
static void
rfbProcessClientProtocolVersion(rfbClientPtr cl)
{
rfbProtocolVersionMsg pv;
int n, major_, minor_;
if ((n = rfbReadExact(cl, pv, sz_rfbProtocolVersionMsg)) <= 0) {
if (n == 0)
rfbLog("rfbProcessClientProtocolVersion: client gone\n");
else
rfbLogPerror("rfbProcessClientProtocolVersion: read");
rfbCloseClient(cl);
return;
}
pv[sz_rfbProtocolVersionMsg] = 0;
if (sscanf(pv,rfbProtocolVersionFormat,&major_,&minor_) != 2) {
rfbErr("rfbProcessClientProtocolVersion: not a valid RFB client: %s\n", pv);
rfbCloseClient(cl);
return;
}
rfbLog("Client Protocol Version %d.%d\n", major_, minor_);
if (major_ != rfbProtocolMajorVersion) {
rfbErr("RFB protocol version mismatch - server %d.%d, client %d.%d",
cl->screen->protocolMajorVersion, cl->screen->protocolMinorVersion,
major_,minor_);
rfbCloseClient(cl);
return;
}
/* Check for the minor version use either of the two standard version of RFB */
/*
* UltraVNC Viewer detects FileTransfer compatible servers via rfb versions
* 3.4, 3.6, 3.14, 3.16
* It's a bad method, but it is what they use to enable features...
* maintaining RFB version compatibility across multiple servers is a pain
* Should use something like ServerIdentity encoding
*/
cl->protocolMajorVersion = major_;
cl->protocolMinorVersion = minor_;
rfbLog("Protocol version sent %d.%d, using %d.%d\n",
major_, minor_, rfbProtocolMajorVersion, cl->protocolMinorVersion);
rfbAuthNewClient(cl);
}
void
rfbClientSendString(rfbClientPtr cl, const char *reason)
{
char *buf;
int len = strlen(reason);
rfbLog("rfbClientSendString(\"%s\")\n", reason);
buf = (char *)malloc(4 + len);
if (buf) {
((uint32_t *)buf)[0] = Swap32IfLE(len);
memcpy(buf + 4, reason, len);
if (rfbWriteExact(cl, buf, 4 + len) < 0)
rfbLogPerror("rfbClientSendString: write");
free(buf);
}
rfbCloseClient(cl);
}
/*
* rfbClientConnFailed is called when a client connection has failed either
* because it talks the wrong protocol or it has failed authentication.
*/
void
rfbClientConnFailed(rfbClientPtr cl,
const char *reason)
{
char *buf;
int len = strlen(reason);
rfbLog("rfbClientConnFailed(\"%s\")\n", reason);
buf = (char *)malloc(8 + len);
if (buf) {
((uint32_t *)buf)[0] = Swap32IfLE(rfbConnFailed);
((uint32_t *)buf)[1] = Swap32IfLE(len);
memcpy(buf + 8, reason, len);
if (rfbWriteExact(cl, buf, 8 + len) < 0)
rfbLogPerror("rfbClientConnFailed: write");
free(buf);
}
rfbCloseClient(cl);
}
/*
* rfbProcessClientInitMessage is called when the client sends its
* initialisation message.
*/
static void
rfbProcessClientInitMessage(rfbClientPtr cl)
{
rfbClientInitMsg ci;
union {
char buf[256];
rfbServerInitMsg si;
} u;
int len, n;
rfbClientIteratorPtr iterator;
rfbClientPtr otherCl;
rfbExtensionData* extension;
if (cl->state == RFB_INITIALISATION_SHARED) {
/* In this case behave as though an implicit ClientInit message has
* already been received with a shared-flag of true. */
ci.shared = 1;
/* Avoid the possibility of exposing the RFB_INITIALISATION_SHARED
* state to calling software. */
cl->state = RFB_INITIALISATION;
} else {
if ((n = rfbReadExact(cl, (char *)&ci,sz_rfbClientInitMsg)) <= 0) {
if (n == 0)
rfbLog("rfbProcessClientInitMessage: client gone\n");
else
rfbLogPerror("rfbProcessClientInitMessage: read");
rfbCloseClient(cl);
return;
}
}
memset(u.buf,0,sizeof(u.buf));
u.si.framebufferWidth = Swap16IfLE(cl->screen->width);
u.si.framebufferHeight = Swap16IfLE(cl->screen->height);
u.si.format = cl->screen->serverFormat;
u.si.format.redMax = Swap16IfLE(u.si.format.redMax);
u.si.format.greenMax = Swap16IfLE(u.si.format.greenMax);
u.si.format.blueMax = Swap16IfLE(u.si.format.blueMax);
strncpy(u.buf + sz_rfbServerInitMsg, cl->screen->desktopName, 127);
len = strlen(u.buf + sz_rfbServerInitMsg);
u.si.nameLength = Swap32IfLE(len);
if (rfbWriteExact(cl, u.buf, sz_rfbServerInitMsg + len) < 0) {
rfbLogPerror("rfbProcessClientInitMessage: write");
rfbCloseClient(cl);
return;
}
for(extension = cl->extensions; extension;) {
rfbExtensionData* next = extension->next;
if(extension->extension->init &&
!extension->extension->init(cl, extension->data))
/* extension requested that it be removed */
rfbDisableExtension(cl, extension->extension);
extension = next;
}
cl->state = RFB_NORMAL;
if (!cl->reverseConnection &&
(cl->screen->neverShared || (!cl->screen->alwaysShared && !ci.shared))) {
if (cl->screen->dontDisconnect) {
iterator = rfbGetClientIterator(cl->screen);
while ((otherCl = rfbClientIteratorNext(iterator)) != NULL) {
if ((otherCl != cl) && (otherCl->state == RFB_NORMAL)) {
rfbLog("-dontdisconnect: Not shared & existing client\n");
rfbLog(" refusing new client %s\n", cl->host);
rfbCloseClient(cl);
rfbReleaseClientIterator(iterator);
return;
}
}
rfbReleaseClientIterator(iterator);
} else {
iterator = rfbGetClientIterator(cl->screen);
while ((otherCl = rfbClientIteratorNext(iterator)) != NULL) {
if ((otherCl != cl) && (otherCl->state == RFB_NORMAL)) {
rfbLog("Not shared - closing connection to client %s\n",
otherCl->host);
rfbCloseClient(otherCl);
}
}
rfbReleaseClientIterator(iterator);
}
}
}
/* The values come in based on the scaled screen, we need to convert them to
* values based on the man screen's coordinate system
*/
static rfbBool rectSwapIfLEAndClip(uint16_t* x,uint16_t* y,uint16_t* w,uint16_t* h,
rfbClientPtr cl)
{
int x1=Swap16IfLE(*x);
int y1=Swap16IfLE(*y);
int w1=Swap16IfLE(*w);
int h1=Swap16IfLE(*h);
rfbScaledCorrection(cl->scaledScreen, cl->screen, &x1, &y1, &w1, &h1, "rectSwapIfLEAndClip");
*x = x1;
*y = y1;
*w = w1;
*h = h1;
if(*w>cl->screen->width-*x)
*w=cl->screen->width-*x;
/* possible underflow */
if(*w>cl->screen->width-*x)
return FALSE;
if(*h>cl->screen->height-*y)
*h=cl->screen->height-*y;
if(*h>cl->screen->height-*y)
return FALSE;
return TRUE;
}
/*
* Send keyboard state (PointerPos pseudo-encoding).
*/
rfbBool
rfbSendKeyboardLedState(rfbClientPtr cl)
{
rfbFramebufferUpdateRectHeader rect;
if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
rect.encoding = Swap32IfLE(rfbEncodingKeyboardLedState);
rect.r.x = Swap16IfLE(cl->lastKeyboardLedState);
rect.r.y = 0;
rect.r.w = 0;
rect.r.h = 0;
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,
sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
rfbStatRecordEncodingSent(cl, rfbEncodingKeyboardLedState, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader);
if (!rfbSendUpdateBuf(cl))
return FALSE;
return TRUE;
}
#define rfbSetBit(buffer, position) (buffer[(position & 255) / 8] |= (1 << (position % 8)))
/*
* Send rfbEncodingSupportedMessages.
*/
rfbBool
rfbSendSupportedMessages(rfbClientPtr cl)
{
rfbFramebufferUpdateRectHeader rect;
rfbSupportedMessages msgs;
if (cl->ublen + sz_rfbFramebufferUpdateRectHeader
+ sz_rfbSupportedMessages > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
rect.encoding = Swap32IfLE(rfbEncodingSupportedMessages);
rect.r.x = 0;
rect.r.y = 0;
rect.r.w = Swap16IfLE(sz_rfbSupportedMessages);
rect.r.h = 0;
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,
sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
memset((char *)&msgs, 0, sz_rfbSupportedMessages);
rfbSetBit(msgs.client2server, rfbSetPixelFormat);
rfbSetBit(msgs.client2server, rfbFixColourMapEntries);
rfbSetBit(msgs.client2server, rfbSetEncodings);
rfbSetBit(msgs.client2server, rfbFramebufferUpdateRequest);
rfbSetBit(msgs.client2server, rfbKeyEvent);
rfbSetBit(msgs.client2server, rfbPointerEvent);
rfbSetBit(msgs.client2server, rfbClientCutText);
rfbSetBit(msgs.client2server, rfbFileTransfer);
rfbSetBit(msgs.client2server, rfbSetScale);
/*rfbSetBit(msgs.client2server, rfbSetServerInput); */
/*rfbSetBit(msgs.client2server, rfbSetSW); */
/*rfbSetBit(msgs.client2server, rfbTextChat); */
rfbSetBit(msgs.client2server, rfbPalmVNCSetScaleFactor);
rfbSetBit(msgs.server2client, rfbFramebufferUpdate);
rfbSetBit(msgs.server2client, rfbSetColourMapEntries);
rfbSetBit(msgs.server2client, rfbBell);
rfbSetBit(msgs.server2client, rfbServerCutText);
rfbSetBit(msgs.server2client, rfbResizeFrameBuffer);
rfbSetBit(msgs.server2client, rfbPalmVNCReSizeFrameBuffer);
rfbSetBit(msgs.client2server, rfbSetDesktopSize);
if (cl->screen->xvpHook) {
rfbSetBit(msgs.client2server, rfbXvp);
rfbSetBit(msgs.server2client, rfbXvp);
}
memcpy(&cl->updateBuf[cl->ublen], (char *)&msgs, sz_rfbSupportedMessages);
cl->ublen += sz_rfbSupportedMessages;
rfbStatRecordEncodingSent(cl, rfbEncodingSupportedMessages,
sz_rfbFramebufferUpdateRectHeader+sz_rfbSupportedMessages,
sz_rfbFramebufferUpdateRectHeader+sz_rfbSupportedMessages);
if (!rfbSendUpdateBuf(cl))
return FALSE;
return TRUE;
}
/*
* Send rfbEncodingSupportedEncodings.
*/
rfbBool
rfbSendSupportedEncodings(rfbClientPtr cl)
{
rfbFramebufferUpdateRectHeader rect;
static uint32_t supported[] = {
rfbEncodingRaw,
rfbEncodingCopyRect,
rfbEncodingRRE,
rfbEncodingCoRRE,
rfbEncodingHextile,
#ifdef LIBVNCSERVER_HAVE_LIBZ
rfbEncodingZlib,
rfbEncodingZRLE,
rfbEncodingZYWRLE,
#endif
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
rfbEncodingTight,
#endif
#ifdef LIBVNCSERVER_HAVE_LIBPNG
rfbEncodingTightPng,
#endif
rfbEncodingUltra,
rfbEncodingUltraZip,
rfbEncodingXCursor,
rfbEncodingRichCursor,
rfbEncodingPointerPos,
rfbEncodingLastRect,
rfbEncodingNewFBSize,
rfbEncodingExtDesktopSize,
rfbEncodingKeyboardLedState,
rfbEncodingSupportedMessages,
rfbEncodingSupportedEncodings,
rfbEncodingServerIdentity,
};
uint32_t nEncodings = sizeof(supported) / sizeof(supported[0]), i;
/* think rfbSetEncodingsMsg */
if (cl->ublen + sz_rfbFramebufferUpdateRectHeader
+ (nEncodings * sizeof(uint32_t)) > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
rect.encoding = Swap32IfLE(rfbEncodingSupportedEncodings);
rect.r.x = 0;
rect.r.y = 0;
rect.r.w = Swap16IfLE(nEncodings * sizeof(uint32_t));
rect.r.h = Swap16IfLE(nEncodings);
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,
sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
for (i = 0; i < nEncodings; i++) {
uint32_t encoding = Swap32IfLE(supported[i]);
memcpy(&cl->updateBuf[cl->ublen], (char *)&encoding, sizeof(encoding));
cl->ublen += sizeof(encoding);
}
rfbStatRecordEncodingSent(cl, rfbEncodingSupportedEncodings,
sz_rfbFramebufferUpdateRectHeader+(nEncodings * sizeof(uint32_t)),
sz_rfbFramebufferUpdateRectHeader+(nEncodings * sizeof(uint32_t)));
if (!rfbSendUpdateBuf(cl))
return FALSE;
return TRUE;
}
void
rfbSetServerVersionIdentity(rfbScreenInfoPtr screen, char *fmt, ...)
{
char buffer[256];
va_list ap;
va_start(ap, fmt);
vsnprintf(buffer, sizeof(buffer)-1, fmt, ap);
va_end(ap);
if (screen->versionString!=NULL) free(screen->versionString);
screen->versionString = strdup(buffer);
}
/*
* Send rfbEncodingServerIdentity.
*/
rfbBool
rfbSendServerIdentity(rfbClientPtr cl)
{
rfbFramebufferUpdateRectHeader rect;
char buffer[512];
/* tack on our library version */
snprintf(buffer,sizeof(buffer)-1, "%s (%s)",
(cl->screen->versionString==NULL ? "unknown" : cl->screen->versionString),
LIBVNCSERVER_PACKAGE_STRING);
if (cl->ublen + sz_rfbFramebufferUpdateRectHeader
+ (strlen(buffer)+1) > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
rect.encoding = Swap32IfLE(rfbEncodingServerIdentity);
rect.r.x = 0;
rect.r.y = 0;
rect.r.w = Swap16IfLE(strlen(buffer)+1);
rect.r.h = 0;
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,
sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
memcpy(&cl->updateBuf[cl->ublen], buffer, strlen(buffer)+1);
cl->ublen += strlen(buffer)+1;
rfbStatRecordEncodingSent(cl, rfbEncodingServerIdentity,
sz_rfbFramebufferUpdateRectHeader+strlen(buffer)+1,
sz_rfbFramebufferUpdateRectHeader+strlen(buffer)+1);
if (!rfbSendUpdateBuf(cl))
return FALSE;
return TRUE;
}
/*
* Send an xvp server message
*/
rfbBool
rfbSendXvp(rfbClientPtr cl, uint8_t version, uint8_t code)
{
rfbXvpMsg xvp;
xvp.type = rfbXvp;
xvp.pad = 0;
xvp.version = version;
xvp.code = code;
LOCK(cl->sendMutex);
if (rfbWriteExact(cl, (char *)&xvp, sz_rfbXvpMsg) < 0) {
rfbLogPerror("rfbSendXvp: write");
rfbCloseClient(cl);
}
UNLOCK(cl->sendMutex);
rfbStatRecordMessageSent(cl, rfbXvp, sz_rfbXvpMsg, sz_rfbXvpMsg);
return TRUE;
}
rfbBool rfbSendTextChatMessage(rfbClientPtr cl, uint32_t length, char *buffer)
{
rfbTextChatMsg tc;
int bytesToSend=0;
memset((char *)&tc, 0, sizeof(tc));
tc.type = rfbTextChat;
tc.length = Swap32IfLE(length);
switch(length) {
case rfbTextChatOpen:
case rfbTextChatClose:
case rfbTextChatFinished:
bytesToSend=0;
break;
default:
bytesToSend=length;
if (bytesToSend>rfbTextMaxSize)
bytesToSend=rfbTextMaxSize;
}
if (cl->ublen + sz_rfbTextChatMsg + bytesToSend > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
memcpy(&cl->updateBuf[cl->ublen], (char *)&tc, sz_rfbTextChatMsg);
cl->ublen += sz_rfbTextChatMsg;
if (bytesToSend>0) {
memcpy(&cl->updateBuf[cl->ublen], buffer, bytesToSend);
cl->ublen += bytesToSend;
}
rfbStatRecordMessageSent(cl, rfbTextChat, sz_rfbTextChatMsg+bytesToSend, sz_rfbTextChatMsg+bytesToSend);
if (!rfbSendUpdateBuf(cl))
return FALSE;
return TRUE;
}
#define FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN(msg, cl, ret) \
if ((cl->screen->getFileTransferPermission != NULL \
&& cl->screen->getFileTransferPermission(cl) != TRUE) \
|| cl->screen->permitFileTransfer != TRUE) { \
rfbLog("%sUltra File Transfer is disabled, dropping client: %s\n", msg, cl->host); \
rfbCloseClient(cl); \
return ret; \
}
int DB = 1;
rfbBool rfbSendFileTransferMessage(rfbClientPtr cl, uint8_t contentType, uint8_t contentParam, uint32_t size, uint32_t length, const char *buffer)
{
rfbFileTransferMsg ft;
ft.type = rfbFileTransfer;
ft.contentType = contentType;
ft.contentParam = contentParam;
ft.pad = 0; /* UltraVNC did not Swap16LE(ft.contentParam) (Looks like it might be BigEndian) */
ft.size = Swap32IfLE(size);
ft.length = Swap32IfLE(length);
FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE);
/*
rfbLog("rfbSendFileTransferMessage( %dtype, %dparam, %dsize, %dlen, %p)\n", contentType, contentParam, size, length, buffer);
*/
LOCK(cl->sendMutex);
if (rfbWriteExact(cl, (char *)&ft, sz_rfbFileTransferMsg) < 0) {
rfbLogPerror("rfbSendFileTransferMessage: write");
rfbCloseClient(cl);
UNLOCK(cl->sendMutex);
return FALSE;
}
if (length>0)
{
if (rfbWriteExact(cl, buffer, length) < 0) {
rfbLogPerror("rfbSendFileTransferMessage: write");
rfbCloseClient(cl);
UNLOCK(cl->sendMutex);
return FALSE;
}
}
UNLOCK(cl->sendMutex);
rfbStatRecordMessageSent(cl, rfbFileTransfer, sz_rfbFileTransferMsg+length, sz_rfbFileTransferMsg+length);
return TRUE;
}
/*
* UltraVNC uses Windows Structures
*/
#define MAX_PATH 260
typedef struct {
uint32_t dwLowDateTime;
uint32_t dwHighDateTime;
} RFB_FILETIME;
typedef struct {
uint32_t dwFileAttributes;
RFB_FILETIME ftCreationTime;
RFB_FILETIME ftLastAccessTime;
RFB_FILETIME ftLastWriteTime;
uint32_t nFileSizeHigh;
uint32_t nFileSizeLow;
uint32_t dwReserved0;
uint32_t dwReserved1;
uint8_t cFileName[ MAX_PATH ];
uint8_t cAlternateFileName[ 14 ];
} RFB_FIND_DATA;
#define RFB_FILE_ATTRIBUTE_READONLY 0x1
#define RFB_FILE_ATTRIBUTE_HIDDEN 0x2
#define RFB_FILE_ATTRIBUTE_SYSTEM 0x4
#define RFB_FILE_ATTRIBUTE_DIRECTORY 0x10
#define RFB_FILE_ATTRIBUTE_ARCHIVE 0x20
#define RFB_FILE_ATTRIBUTE_NORMAL 0x80
#define RFB_FILE_ATTRIBUTE_TEMPORARY 0x100
#define RFB_FILE_ATTRIBUTE_COMPRESSED 0x800
rfbBool rfbFilenameTranslate2UNIX(rfbClientPtr cl, /* in */ char *path, /* out */ char *unixPath, size_t unixPathMaxLen)
{
int x;
char *home=NULL;
FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE);
/*
* Do not use strncpy() - truncating the file name would probably have undesirable side effects
* Instead check if destination buffer is big enough
*/
if (strlen(path) >= unixPathMaxLen)
return FALSE;
/* C: */
if (path[0]=='C' && path[1]==':')
strcpy(unixPath, &path[2]);
else
{
home = getenv("HOME");
if (home!=NULL)
{
/* Re-check buffer size */
if ((strlen(path) + strlen(home) + 1) >= unixPathMaxLen)
return FALSE;
strcpy(unixPath, home);
strcat(unixPath,"/");
strcat(unixPath, path);
}
else
strcpy(unixPath, path);
}
for (x=0;x<strlen(unixPath);x++)
if (unixPath[x]=='\\') unixPath[x]='/';
return TRUE;
}
rfbBool rfbFilenameTranslate2DOS(rfbClientPtr cl, char *unixPath, char *path)
{
int x;
FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE);
sprintf(path,"C:%s", unixPath);
for (x=2;x<strlen(path);x++)
if (path[x]=='/') path[x]='\\';
return TRUE;
}
rfbBool rfbSendDirContent(rfbClientPtr cl, int length, char *buffer)
{
char retfilename[MAX_PATH];
char path[MAX_PATH];
struct stat statbuf;
RFB_FIND_DATA win32filename;
int nOptLen = 0, retval=0;
#ifdef WIN32
WIN32_FIND_DATAA winFindData;
HANDLE findHandle;
int pathLen, basePathLength;
char *basePath;
#else
DIR *dirp=NULL;
struct dirent *direntp=NULL;
#endif
FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE);
/* Client thinks we are Winblows */
if (!rfbFilenameTranslate2UNIX(cl, buffer, path, sizeof(path)))
return FALSE;
if (DB) rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent: \"%s\"->\"%s\"\n",buffer, path);
#ifdef WIN32
// Create a search string, like C:\folder\*
pathLen = strlen(path);
basePath = malloc(pathLen + 3);
memcpy(basePath, path, pathLen);
basePathLength = pathLen;
basePath[basePathLength] = '\\';
basePath[basePathLength + 1] = '*';
basePath[basePathLength + 2] = '\0';
// Start a search
memset(&winFindData, 0, sizeof(winFindData));
findHandle = FindFirstFileA(path, &winFindData);
free(basePath);
if (findHandle == INVALID_HANDLE_VALUE)
#else
dirp=opendir(path);
if (dirp==NULL)
#endif
return rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, 0, NULL);
/* send back the path name (necessary for links) */
if (rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, length, buffer)==FALSE) return FALSE;
#ifdef WIN32
while (findHandle != INVALID_HANDLE_VALUE)
#else
for (direntp=readdir(dirp); direntp!=NULL; direntp=readdir(dirp))
#endif
{
/* get stats */
#ifdef WIN32
snprintf(retfilename,sizeof(retfilename),"%s/%s", path, winFindData.cFileName);
#else
snprintf(retfilename,sizeof(retfilename),"%s/%s", path, direntp->d_name);
#endif
retval = stat(retfilename, &statbuf);
if (retval==0)
{
memset((char *)&win32filename, 0, sizeof(win32filename));
#ifdef WIN32
win32filename.dwFileAttributes = winFindData.dwFileAttributes;
win32filename.ftCreationTime.dwLowDateTime = winFindData.ftCreationTime.dwLowDateTime;
win32filename.ftCreationTime.dwHighDateTime = winFindData.ftCreationTime.dwHighDateTime;
win32filename.ftLastAccessTime.dwLowDateTime = winFindData.ftLastAccessTime.dwLowDateTime;
win32filename.ftLastAccessTime.dwHighDateTime = winFindData.ftLastAccessTime.dwHighDateTime;
win32filename.ftLastWriteTime.dwLowDateTime = winFindData.ftLastWriteTime.dwLowDateTime;
win32filename.ftLastWriteTime.dwHighDateTime = winFindData.ftLastWriteTime.dwHighDateTime;
win32filename.nFileSizeLow = winFindData.nFileSizeLow;
win32filename.nFileSizeHigh = winFindData.nFileSizeHigh;
win32filename.dwReserved0 = winFindData.dwReserved0;
win32filename.dwReserved1 = winFindData.dwReserved1;
strcpy((char *)win32filename.cFileName, winFindData.cFileName);
strcpy((char *)win32filename.cAlternateFileName, winFindData.cAlternateFileName);
#else
win32filename.dwFileAttributes = Swap32IfBE(RFB_FILE_ATTRIBUTE_NORMAL);
if (S_ISDIR(statbuf.st_mode))
win32filename.dwFileAttributes = Swap32IfBE(RFB_FILE_ATTRIBUTE_DIRECTORY);
win32filename.ftCreationTime.dwLowDateTime = Swap32IfBE(statbuf.st_ctime); /* Intel Order */
win32filename.ftCreationTime.dwHighDateTime = 0;
win32filename.ftLastAccessTime.dwLowDateTime = Swap32IfBE(statbuf.st_atime); /* Intel Order */
win32filename.ftLastAccessTime.dwHighDateTime = 0;
win32filename.ftLastWriteTime.dwLowDateTime = Swap32IfBE(statbuf.st_mtime); /* Intel Order */
win32filename.ftLastWriteTime.dwHighDateTime = 0;
win32filename.nFileSizeLow = Swap32IfBE(statbuf.st_size); /* Intel Order */
win32filename.nFileSizeHigh = 0;
win32filename.dwReserved0 = 0;
win32filename.dwReserved1 = 0;
/* If this had the full path, we would need to translate to DOS format ("C:\") */
/* rfbFilenameTranslate2DOS(cl, retfilename, win32filename.cFileName); */
strcpy((char *)win32filename.cFileName, direntp->d_name);
#endif
/* Do not show hidden files (but show how to move up the tree) */
if ((strcmp((char *)win32filename.cFileName, "..")==0) || (win32filename.cFileName[0]!='.'))
{
nOptLen = sizeof(RFB_FIND_DATA) - MAX_PATH - 14 + strlen((char *)win32filename.cFileName);
/*
rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent: Sending \"%s\"\n", (char *)win32filename.cFileName);
*/
if (rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADirectory, 0, nOptLen, (char *)&win32filename)==FALSE)
{
#ifdef WIN32
FindClose(findHandle);
#else
closedir(dirp);
#endif
return FALSE;
}
}
}
#ifdef WIN32
if (FindNextFileA(findHandle, &winFindData) == 0)
{
FindClose(findHandle);
findHandle = INVALID_HANDLE_VALUE;
}
#endif
}
#ifdef WIN32
if (findHandle != INVALID_HANDLE_VALUE)
{
FindClose(findHandle);
}
#else
closedir(dirp);
#endif
/* End of the transfer */
return rfbSendFileTransferMessage(cl, rfbDirPacket, 0, 0, 0, NULL);
}
char *rfbProcessFileTransferReadBuffer(rfbClientPtr cl, uint32_t length)
{
char *buffer=NULL;
int n=0;
FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, NULL);
/*
We later alloc length+1, which might wrap around on 32-bit systems if length equals
0XFFFFFFFF, i.e. SIZE_MAX for 32-bit systems. On 64-bit systems, a length of 0XFFFFFFFF
will safely be allocated since this check will never trigger and malloc() can digest length+1
without problems as length is a uint32_t.
We also later pass length to rfbReadExact() that expects a signed int type and
that might wrap on platforms with a 32-bit int type if length is bigger
than 0X7FFFFFFF.
*/
if(length == SIZE_MAX || length > INT_MAX) {
rfbErr("rfbProcessFileTransferReadBuffer: too big file transfer length requested: %u", (unsigned int)length);
rfbCloseClient(cl);
return NULL;
}
if (length>0) {
buffer=malloc((size_t)length+1);
if (buffer!=NULL) {
if ((n = rfbReadExact(cl, (char *)buffer, length)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessFileTransferReadBuffer: read");
rfbCloseClient(cl);
/* NOTE: don't forget to free(buffer) if you return early! */
if (buffer!=NULL) free(buffer);
return NULL;
}
/* Null Terminate */
buffer[length]=0;
}
}
return buffer;
}
rfbBool rfbSendFileTransferChunk(rfbClientPtr cl)
{
/* Allocate buffer for compression */
char readBuf[sz_rfbBlockSize];
int bytesRead=0;
int retval=0;
fd_set wfds;
struct timeval tv;
int n;
#ifdef LIBVNCSERVER_HAVE_LIBZ
unsigned char compBuf[sz_rfbBlockSize + 1024];
unsigned long nMaxCompSize = sizeof(compBuf);
int nRetC = 0;
#endif
/*
* Don't close the client if we get into this one because
* it is called from many places to service file transfers.
* Note that permitFileTransfer is checked first.
*/
if (cl->screen->permitFileTransfer != TRUE ||
(cl->screen->getFileTransferPermission != NULL
&& cl->screen->getFileTransferPermission(cl) != TRUE)) {
return TRUE;
}
/* If not sending, or no file open... Return as if we sent something! */
if ((cl->fileTransfer.fd!=-1) && (cl->fileTransfer.sending==1))
{
FD_ZERO(&wfds);
FD_SET(cl->sock, &wfds);
/* return immediately */
tv.tv_sec = 0;
tv.tv_usec = 0;
n = select(cl->sock + 1, NULL, &wfds, NULL, &tv);
if (n<0) {
#ifdef WIN32
errno=WSAGetLastError();
#endif
rfbLog("rfbSendFileTransferChunk() select failed: %s\n", strerror(errno));
}
/* We have space on the transmit queue */
if (n > 0)
{
bytesRead = read(cl->fileTransfer.fd, readBuf, sz_rfbBlockSize);
switch (bytesRead) {
case 0:
/*
rfbLog("rfbSendFileTransferChunk(): End-Of-File Encountered\n");
*/
retval = rfbSendFileTransferMessage(cl, rfbEndOfFile, 0, 0, 0, NULL);
close(cl->fileTransfer.fd);
cl->fileTransfer.fd = -1;
cl->fileTransfer.sending = 0;
cl->fileTransfer.receiving = 0;
return retval;
case -1:
/* TODO : send an error msg to the client... */
#ifdef WIN32
errno=WSAGetLastError();
#endif
rfbLog("rfbSendFileTransferChunk(): %s\n",strerror(errno));
retval = rfbSendFileTransferMessage(cl, rfbAbortFileTransfer, 0, 0, 0, NULL);
close(cl->fileTransfer.fd);
cl->fileTransfer.fd = -1;
cl->fileTransfer.sending = 0;
cl->fileTransfer.receiving = 0;
return retval;
default:
/*
rfbLog("rfbSendFileTransferChunk(): Read %d bytes\n", bytesRead);
*/
if (!cl->fileTransfer.compressionEnabled)
return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, readBuf);
else
{
#ifdef LIBVNCSERVER_HAVE_LIBZ
nRetC = compress(compBuf, &nMaxCompSize, (unsigned char *)readBuf, bytesRead);
/*
rfbLog("Compressed the packet from %d -> %d bytes\n", nMaxCompSize, bytesRead);
*/
if ((nRetC==0) && (nMaxCompSize<bytesRead))
return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 1, nMaxCompSize, (char *)compBuf);
else
return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, readBuf);
#else
/* We do not support compression of the data stream */
return rfbSendFileTransferMessage(cl, rfbFilePacket, 0, 0, bytesRead, readBuf);
#endif
}
}
}
}
return TRUE;
}
rfbBool rfbProcessFileTransfer(rfbClientPtr cl, uint8_t contentType, uint8_t contentParam, uint32_t size, uint32_t length)
{
char *buffer=NULL, *p=NULL;
int retval=0;
char filename1[MAX_PATH];
char filename2[MAX_PATH];
char szFileTime[MAX_PATH];
struct stat statbuf;
uint32_t sizeHtmp=0;
int n=0;
char timespec[64];
#ifdef LIBVNCSERVER_HAVE_LIBZ
unsigned char compBuff[sz_rfbBlockSize];
unsigned long nRawBytes = sz_rfbBlockSize;
int nRet = 0;
#endif
FILEXFER_ALLOWED_OR_CLOSE_AND_RETURN("", cl, FALSE);
/*
rfbLog("rfbProcessFileTransfer(%dtype, %dparam, %dsize, %dlen)\n", contentType, contentParam, size, length);
*/
switch (contentType) {
case rfbDirContentRequest:
switch (contentParam) {
case rfbRDrivesList: /* Client requests the List of Local Drives */
/*
rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDrivesList:\n");
*/
/* Format when filled : "C:\<NULL>D:\<NULL>....Z:\<NULL><NULL>
*
* We replace the "\" char following the drive letter and ":"
* with a char corresponding to the type of drive
* We obtain something like "C:l<NULL>D:c<NULL>....Z:n\<NULL><NULL>"
* Isn't it ugly ?
* DRIVE_FIXED = 'l' (local?)
* DRIVE_REMOVABLE = 'f' (floppy?)
* DRIVE_CDROM = 'c'
* DRIVE_REMOTE = 'n'
*/
/* in unix, there are no 'drives' (We could list mount points though)
* We fake the root as a "C:" for the Winblows users
*/
filename2[0]='C';
filename2[1]=':';
filename2[2]='l';
filename2[3]=0;
filename2[4]=0;
retval = rfbSendFileTransferMessage(cl, rfbDirPacket, rfbADrivesList, 0, 5, filename2);
if (buffer!=NULL) free(buffer);
return retval;
break;
case rfbRDirContent: /* Client requests the content of a directory */
/*
rfbLog("rfbProcessFileTransfer() rfbDirContentRequest: rfbRDirContent\n");
*/
if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE;
retval = rfbSendDirContent(cl, length, buffer);
if (buffer!=NULL) free(buffer);
return retval;
}
break;
case rfbDirPacket:
rfbLog("rfbProcessFileTransfer() rfbDirPacket\n");
break;
case rfbFileAcceptHeader:
rfbLog("rfbProcessFileTransfer() rfbFileAcceptHeader\n");
break;
case rfbCommandReturn:
rfbLog("rfbProcessFileTransfer() rfbCommandReturn\n");
break;
case rfbFileChecksums:
/* Destination file already exists - the viewer sends the checksums */
rfbLog("rfbProcessFileTransfer() rfbFileChecksums\n");
break;
case rfbFileTransferAccess:
rfbLog("rfbProcessFileTransfer() rfbFileTransferAccess\n");
break;
/*
* sending from the server to the viewer
*/
case rfbFileTransferRequest:
/*
rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest:\n");
*/
/* add some space to the end of the buffer as we will be adding a timespec to it */
if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE;
/* The client requests a File */
if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1)))
goto fail;
cl->fileTransfer.fd=open(filename1, O_RDONLY, 0744);
/*
*/
if (DB) rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest(\"%s\"->\"%s\") Open: %s fd=%d\n", buffer, filename1, (cl->fileTransfer.fd==-1?"Failed":"Success"), cl->fileTransfer.fd);
if (cl->fileTransfer.fd!=-1) {
if (fstat(cl->fileTransfer.fd, &statbuf)!=0) {
close(cl->fileTransfer.fd);
cl->fileTransfer.fd=-1;
}
else
{
/* Add the File Time Stamp to the filename */
strftime(timespec, sizeof(timespec), "%m/%d/%Y %H:%M",gmtime(&statbuf.st_ctime));
buffer=realloc(buffer, length + strlen(timespec) + 2); /* comma, and Null term */
if (buffer==NULL) {
rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest: Failed to malloc %d bytes\n", length + strlen(timespec) + 2);
return FALSE;
}
strcat(buffer,",");
strcat(buffer, timespec);
length = strlen(buffer);
if (DB) rfbLog("rfbProcessFileTransfer() buffer is now: \"%s\"\n", buffer);
}
}
/* The viewer supports compression if size==1 */
cl->fileTransfer.compressionEnabled = (size==1);
/*
rfbLog("rfbProcessFileTransfer() rfbFileTransferRequest(\"%s\"->\"%s\")%s\n", buffer, filename1, (size==1?" <Compression Enabled>":""));
*/
/* File Size in bytes, 0xFFFFFFFF (-1) means error */
retval = rfbSendFileTransferMessage(cl, rfbFileHeader, 0, (cl->fileTransfer.fd==-1 ? -1 : statbuf.st_size), length, buffer);
if (cl->fileTransfer.fd==-1)
{
if (buffer!=NULL) free(buffer);
return retval;
}
/* setup filetransfer stuff */
cl->fileTransfer.fileSize = statbuf.st_size;
cl->fileTransfer.numPackets = statbuf.st_size / sz_rfbBlockSize;
cl->fileTransfer.receiving = 0;
cl->fileTransfer.sending = 0; /* set when we receive a rfbFileHeader: */
/* TODO: finish 64-bit file size support */
sizeHtmp = 0;
LOCK(cl->sendMutex);
if (rfbWriteExact(cl, (char *)&sizeHtmp, 4) < 0) {
rfbLogPerror("rfbProcessFileTransfer: write");
rfbCloseClient(cl);
UNLOCK(cl->sendMutex);
if (buffer!=NULL) free(buffer);
return FALSE;
}
UNLOCK(cl->sendMutex);
break;
case rfbFileHeader:
/* Destination file (viewer side) is ready for reception (size > 0) or not (size = -1) */
if (size==-1) {
rfbLog("rfbProcessFileTransfer() rfbFileHeader (error, aborting)\n");
close(cl->fileTransfer.fd);
cl->fileTransfer.fd=-1;
return TRUE;
}
/*
rfbLog("rfbProcessFileTransfer() rfbFileHeader (%d bytes of a file)\n", size);
*/
/* Starts the transfer! */
cl->fileTransfer.sending=1;
return rfbSendFileTransferChunk(cl);
break;
/*
* sending from the viewer to the server
*/
case rfbFileTransferOffer:
/* client is sending a file to us */
/* buffer contains full path name (plus FileTime) */
/* size contains size of the file */
/*
rfbLog("rfbProcessFileTransfer() rfbFileTransferOffer:\n");
*/
if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE;
/* Parse the FileTime */
p = strrchr(buffer, ',');
if (p!=NULL) {
*p = '\0';
strncpy(szFileTime, p+1, sizeof(szFileTime));
szFileTime[sizeof(szFileTime)-1] = '\x00'; /* ensure NULL terminating byte is present, even if copy overflowed */
} else
szFileTime[0]=0;
/* Need to read in sizeHtmp */
if ((n = rfbReadExact(cl, (char *)&sizeHtmp, 4)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessFileTransfer: read sizeHtmp");
rfbCloseClient(cl);
/* NOTE: don't forget to free(buffer) if you return early! */
if (buffer!=NULL) free(buffer);
return FALSE;
}
sizeHtmp = Swap32IfLE(sizeHtmp);
if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1)))
goto fail;
/* If the file exists... We can send a rfbFileChecksums back to the client before we send an rfbFileAcceptHeader */
/* TODO: Delta Transfer */
cl->fileTransfer.fd=open(filename1, O_CREAT|O_WRONLY|O_TRUNC, 0744);
if (DB) rfbLog("rfbProcessFileTransfer() rfbFileTransferOffer(\"%s\"->\"%s\") %s %s fd=%d\n", buffer, filename1, (cl->fileTransfer.fd==-1?"Failed":"Success"), (cl->fileTransfer.fd==-1?strerror(errno):""), cl->fileTransfer.fd);
/*
*/
/* File Size in bytes, 0xFFFFFFFF (-1) means error */
retval = rfbSendFileTransferMessage(cl, rfbFileAcceptHeader, 0, (cl->fileTransfer.fd==-1 ? -1 : 0), length, buffer);
if (cl->fileTransfer.fd==-1) {
free(buffer);
return retval;
}
/* setup filetransfer stuff */
cl->fileTransfer.fileSize = size;
cl->fileTransfer.numPackets = size / sz_rfbBlockSize;
cl->fileTransfer.receiving = 1;
cl->fileTransfer.sending = 0;
break;
case rfbFilePacket:
/*
rfbLog("rfbProcessFileTransfer() rfbFilePacket:\n");
*/
if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE;
if (cl->fileTransfer.fd!=-1) {
/* buffer contains the contents of the file */
if (size==0)
retval=write(cl->fileTransfer.fd, buffer, length);
else
{
#ifdef LIBVNCSERVER_HAVE_LIBZ
/* compressed packet */
nRet = uncompress(compBuff,&nRawBytes,(const unsigned char*)buffer, length);
if(nRet == Z_OK)
retval=write(cl->fileTransfer.fd, (char*)compBuff, nRawBytes);
else
retval = -1;
#else
/* Write the file out as received... */
retval=write(cl->fileTransfer.fd, buffer, length);
#endif
}
if (retval==-1)
{
close(cl->fileTransfer.fd);
cl->fileTransfer.fd=-1;
cl->fileTransfer.sending = 0;
cl->fileTransfer.receiving = 0;
}
}
break;
case rfbEndOfFile:
if (DB) rfbLog("rfbProcessFileTransfer() rfbEndOfFile\n");
/*
*/
if (cl->fileTransfer.fd!=-1)
close(cl->fileTransfer.fd);
cl->fileTransfer.fd=-1;
cl->fileTransfer.sending = 0;
cl->fileTransfer.receiving = 0;
break;
case rfbAbortFileTransfer:
if (DB) rfbLog("rfbProcessFileTransfer() rfbAbortFileTransfer\n");
/*
*/
if (cl->fileTransfer.fd!=-1)
{
close(cl->fileTransfer.fd);
cl->fileTransfer.fd=-1;
cl->fileTransfer.sending = 0;
cl->fileTransfer.receiving = 0;
}
else
{
/* We use this message for FileTransfer rights (<=RC18 versions)
* The client asks for FileTransfer permission
*/
if (contentParam == 0)
{
rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED! (Client Version <=RC18)\n");
/* Old method for FileTransfer handshake perimssion (<=RC18) (Deny it)*/
return rfbSendFileTransferMessage(cl, rfbAbortFileTransfer, 0, -1, 0, "");
}
/* New method is allowed */
if (cl->screen->getFileTransferPermission!=NULL)
{
if (cl->screen->getFileTransferPermission(cl)==TRUE)
{
rfbLog("rfbProcessFileTransfer() File Transfer Permission Granted!\n");
return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, 1 , 0, ""); /* Permit */
}
else
{
rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED!\n");
return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, -1 , 0, ""); /* Deny */
}
}
else
{
if (cl->screen->permitFileTransfer)
{
rfbLog("rfbProcessFileTransfer() File Transfer Permission Granted!\n");
return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, 1 , 0, ""); /* Permit */
}
else
{
rfbLog("rfbProcessFileTransfer() File Transfer Permission DENIED by default!\n");
return rfbSendFileTransferMessage(cl, rfbFileTransferAccess, 0, -1 , 0, ""); /* DEFAULT: DENY (for security) */
}
}
}
break;
case rfbCommand:
/*
rfbLog("rfbProcessFileTransfer() rfbCommand:\n");
*/
if ((buffer = rfbProcessFileTransferReadBuffer(cl, length))==NULL) return FALSE;
switch (contentParam) {
case rfbCDirCreate: /* Client requests the creation of a directory */
if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1)))
goto fail;
retval = mkdir(filename1, 0755);
if (DB) rfbLog("rfbProcessFileTransfer() rfbCommand: rfbCDirCreate(\"%s\"->\"%s\") %s\n", buffer, filename1, (retval==-1?"Failed":"Success"));
/*
*/
retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbADirCreate, retval, length, buffer);
if (buffer!=NULL) free(buffer);
return retval;
case rfbCFileDelete: /* Client requests the deletion of a file */
if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1)))
goto fail;
if (stat(filename1,&statbuf)==0)
{
if (S_ISDIR(statbuf.st_mode))
retval = rmdir(filename1);
else
retval = unlink(filename1);
}
else retval=-1;
retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbAFileDelete, retval, length, buffer);
if (buffer!=NULL) free(buffer);
return retval;
case rfbCFileRename: /* Client requests the Renaming of a file/directory */
p = strrchr(buffer, '*');
if (p != NULL)
{
/* Split into 2 filenames ('*' is a seperator) */
*p = '\0';
if (!rfbFilenameTranslate2UNIX(cl, buffer, filename1, sizeof(filename1)))
goto fail;
if (!rfbFilenameTranslate2UNIX(cl, p+1, filename2, sizeof(filename2)))
goto fail;
retval = rename(filename1,filename2);
if (DB) rfbLog("rfbProcessFileTransfer() rfbCommand: rfbCFileRename(\"%s\"->\"%s\" -->> \"%s\"->\"%s\") %s\n", buffer, filename1, p+1, filename2, (retval==-1?"Failed":"Success"));
/*
*/
/* Restore the buffer so the reply is good */
*p = '*';
retval = rfbSendFileTransferMessage(cl, rfbCommandReturn, rfbAFileRename, retval, length, buffer);
if (buffer!=NULL) free(buffer);
return retval;
}
break;
}
break;
}
/* NOTE: don't forget to free(buffer) if you return early! */
if (buffer!=NULL) free(buffer);
return TRUE;
fail:
if (buffer!=NULL) free(buffer);
return FALSE;
}
/*
* rfbProcessClientNormalMessage is called when the client has sent a normal
* protocol message.
*/
static void
rfbProcessClientNormalMessage(rfbClientPtr cl)
{
int n=0;
rfbClientToServerMsg msg;
char *str;
int i;
uint32_t enc=0;
uint32_t lastPreferredEncoding = -1;
char encBuf[64];
char encBuf2[64];
rfbExtDesktopScreen *extDesktopScreens;
rfbClientIteratorPtr iterator;
rfbClientPtr clp;
if ((n = rfbReadExact(cl, (char *)&msg, 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
switch (msg.type) {
case rfbSetPixelFormat:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetPixelFormatMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
cl->format.bitsPerPixel = msg.spf.format.bitsPerPixel;
cl->format.depth = msg.spf.format.depth;
cl->format.bigEndian = (msg.spf.format.bigEndian ? TRUE : FALSE);
cl->format.trueColour = (msg.spf.format.trueColour ? TRUE : FALSE);
cl->format.redMax = Swap16IfLE(msg.spf.format.redMax);
cl->format.greenMax = Swap16IfLE(msg.spf.format.greenMax);
cl->format.blueMax = Swap16IfLE(msg.spf.format.blueMax);
cl->format.redShift = msg.spf.format.redShift;
cl->format.greenShift = msg.spf.format.greenShift;
cl->format.blueShift = msg.spf.format.blueShift;
cl->readyForSetColourMapEntries = TRUE;
cl->screen->setTranslateFunction(cl);
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg);
return;
case rfbFixColourMapEntries:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbFixColourMapEntriesMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetPixelFormatMsg, sz_rfbSetPixelFormatMsg);
rfbLog("rfbProcessClientNormalMessage: %s",
"FixColourMapEntries unsupported\n");
rfbCloseClient(cl);
return;
/* NOTE: Some clients send us a set of encodings (ie: PointerPos) designed to enable/disable features...
* We may want to look into this...
* Example:
* case rfbEncodingXCursor:
* cl->enableCursorShapeUpdates = TRUE;
*
* Currently: cl->enableCursorShapeUpdates can *never* be turned off...
*/
case rfbSetEncodings:
{
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetEncodingsMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.se.nEncodings = Swap16IfLE(msg.se.nEncodings);
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4),sz_rfbSetEncodingsMsg+(msg.se.nEncodings*4));
/*
* UltraVNC Client has the ability to adapt to changing network environments
* So, let's give it a change to tell us what it wants now!
*/
if (cl->preferredEncoding!=-1)
lastPreferredEncoding = cl->preferredEncoding;
/* Reset all flags to defaults (allows us to switch between PointerPos and Server Drawn Cursors) */
cl->preferredEncoding=-1;
cl->useCopyRect = FALSE;
cl->useNewFBSize = FALSE;
cl->useExtDesktopSize = FALSE;
cl->cursorWasChanged = FALSE;
cl->useRichCursorEncoding = FALSE;
cl->enableCursorPosUpdates = FALSE;
cl->enableCursorShapeUpdates = FALSE;
cl->enableCursorShapeUpdates = FALSE;
cl->enableLastRectEncoding = FALSE;
cl->enableKeyboardLedState = FALSE;
cl->enableSupportedMessages = FALSE;
cl->enableSupportedEncodings = FALSE;
cl->enableServerIdentity = FALSE;
#if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG)
cl->tightQualityLevel = -1;
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
cl->tightCompressLevel = TIGHT_DEFAULT_COMPRESSION;
cl->turboSubsampLevel = TURBO_DEFAULT_SUBSAMP;
cl->turboQualityLevel = -1;
#endif
#endif
for (i = 0; i < msg.se.nEncodings; i++) {
if ((n = rfbReadExact(cl, (char *)&enc, 4)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
enc = Swap32IfLE(enc);
switch (enc) {
case rfbEncodingCopyRect:
cl->useCopyRect = TRUE;
break;
case rfbEncodingRaw:
case rfbEncodingRRE:
case rfbEncodingCoRRE:
case rfbEncodingHextile:
case rfbEncodingUltra:
#ifdef LIBVNCSERVER_HAVE_LIBZ
case rfbEncodingZlib:
case rfbEncodingZRLE:
case rfbEncodingZYWRLE:
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
case rfbEncodingTight:
#endif
#endif
#ifdef LIBVNCSERVER_HAVE_LIBPNG
case rfbEncodingTightPng:
#endif
/* The first supported encoding is the 'preferred' encoding */
if (cl->preferredEncoding == -1)
cl->preferredEncoding = enc;
break;
case rfbEncodingXCursor:
if(!cl->screen->dontConvertRichCursorToXCursor) {
rfbLog("Enabling X-style cursor updates for client %s\n",
cl->host);
/* if cursor was drawn, hide the cursor */
if(!cl->enableCursorShapeUpdates)
rfbRedrawAfterHideCursor(cl,NULL);
cl->enableCursorShapeUpdates = TRUE;
cl->cursorWasChanged = TRUE;
}
break;
case rfbEncodingRichCursor:
rfbLog("Enabling full-color cursor updates for client %s\n",
cl->host);
/* if cursor was drawn, hide the cursor */
if(!cl->enableCursorShapeUpdates)
rfbRedrawAfterHideCursor(cl,NULL);
cl->enableCursorShapeUpdates = TRUE;
cl->useRichCursorEncoding = TRUE;
cl->cursorWasChanged = TRUE;
break;
case rfbEncodingPointerPos:
if (!cl->enableCursorPosUpdates) {
rfbLog("Enabling cursor position updates for client %s\n",
cl->host);
cl->enableCursorPosUpdates = TRUE;
cl->cursorWasMoved = TRUE;
}
break;
case rfbEncodingLastRect:
if (!cl->enableLastRectEncoding) {
rfbLog("Enabling LastRect protocol extension for client "
"%s\n", cl->host);
cl->enableLastRectEncoding = TRUE;
}
break;
case rfbEncodingNewFBSize:
if (!cl->useNewFBSize) {
rfbLog("Enabling NewFBSize protocol extension for client "
"%s\n", cl->host);
cl->useNewFBSize = TRUE;
}
break;
case rfbEncodingExtDesktopSize:
if (!cl->useExtDesktopSize) {
rfbLog("Enabling ExtDesktopSize protocol extension for client "
"%s\n", cl->host);
cl->useExtDesktopSize = TRUE;
cl->useNewFBSize = TRUE;
}
break;
case rfbEncodingKeyboardLedState:
if (!cl->enableKeyboardLedState) {
rfbLog("Enabling KeyboardLedState protocol extension for client "
"%s\n", cl->host);
cl->enableKeyboardLedState = TRUE;
}
break;
case rfbEncodingSupportedMessages:
if (!cl->enableSupportedMessages) {
rfbLog("Enabling SupportedMessages protocol extension for client "
"%s\n", cl->host);
cl->enableSupportedMessages = TRUE;
}
break;
case rfbEncodingSupportedEncodings:
if (!cl->enableSupportedEncodings) {
rfbLog("Enabling SupportedEncodings protocol extension for client "
"%s\n", cl->host);
cl->enableSupportedEncodings = TRUE;
}
break;
case rfbEncodingServerIdentity:
if (!cl->enableServerIdentity) {
rfbLog("Enabling ServerIdentity protocol extension for client "
"%s\n", cl->host);
cl->enableServerIdentity = TRUE;
}
break;
case rfbEncodingXvp:
if (cl->screen->xvpHook) {
rfbLog("Enabling Xvp protocol extension for client "
"%s\n", cl->host);
if (!rfbSendXvp(cl, 1, rfbXvp_Init)) {
rfbCloseClient(cl);
return;
}
}
break;
default:
#if defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG)
if ( enc >= (uint32_t)rfbEncodingCompressLevel0 &&
enc <= (uint32_t)rfbEncodingCompressLevel9 ) {
cl->zlibCompressLevel = enc & 0x0F;
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
cl->tightCompressLevel = enc & 0x0F;
rfbLog("Using compression level %d for client %s\n",
cl->tightCompressLevel, cl->host);
#endif
} else if ( enc >= (uint32_t)rfbEncodingQualityLevel0 &&
enc <= (uint32_t)rfbEncodingQualityLevel9 ) {
cl->tightQualityLevel = enc & 0x0F;
rfbLog("Using image quality level %d for client %s\n",
cl->tightQualityLevel, cl->host);
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
cl->turboQualityLevel = tight2turbo_qual[enc & 0x0F];
cl->turboSubsampLevel = tight2turbo_subsamp[enc & 0x0F];
rfbLog("Using JPEG subsampling %d, Q%d for client %s\n",
cl->turboSubsampLevel, cl->turboQualityLevel, cl->host);
} else if ( enc >= (uint32_t)rfbEncodingFineQualityLevel0 + 1 &&
enc <= (uint32_t)rfbEncodingFineQualityLevel100 ) {
cl->turboQualityLevel = enc & 0xFF;
rfbLog("Using fine quality level %d for client %s\n",
cl->turboQualityLevel, cl->host);
} else if ( enc >= (uint32_t)rfbEncodingSubsamp1X &&
enc <= (uint32_t)rfbEncodingSubsampGray ) {
cl->turboSubsampLevel = enc & 0xFF;
rfbLog("Using subsampling level %d for client %s\n",
cl->turboSubsampLevel, cl->host);
#endif
} else
#endif
{
rfbExtensionData* e;
for(e = cl->extensions; e;) {
rfbExtensionData* next = e->next;
if(e->extension->enablePseudoEncoding &&
e->extension->enablePseudoEncoding(cl,
&e->data, (int)enc))
/* ext handles this encoding */
break;
e = next;
}
if(e == NULL) {
rfbBool handled = FALSE;
/* if the pseudo encoding is not handled by the
enabled extensions, search through all
extensions. */
rfbProtocolExtension* e;
for(e = rfbGetExtensionIterator(); e;) {
int* encs = e->pseudoEncodings;
while(encs && *encs!=0) {
if(*encs==(int)enc) {
void* data = NULL;
if(!e->enablePseudoEncoding(cl, &data, (int)enc)) {
rfbLog("Installed extension pretends to handle pseudo encoding 0x%x, but does not!\n",(int)enc);
} else {
rfbEnableExtension(cl, e, data);
handled = TRUE;
e = NULL;
break;
}
}
encs++;
}
if(e)
e = e->next;
}
rfbReleaseExtensionIterator();
if(!handled)
rfbLog("rfbProcessClientNormalMessage: "
"ignoring unsupported encoding type %s\n",
encodingName(enc,encBuf,sizeof(encBuf)));
}
}
}
}
if (cl->preferredEncoding == -1) {
if (lastPreferredEncoding==-1) {
cl->preferredEncoding = rfbEncodingRaw;
rfbLog("Defaulting to %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host);
}
else {
cl->preferredEncoding = lastPreferredEncoding;
rfbLog("Sticking with %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host);
}
}
else
{
if (lastPreferredEncoding==-1) {
rfbLog("Using %s encoding for client %s\n", encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)),cl->host);
} else {
rfbLog("Switching from %s to %s Encoding for client %s\n",
encodingName(lastPreferredEncoding,encBuf2,sizeof(encBuf2)),
encodingName(cl->preferredEncoding,encBuf,sizeof(encBuf)), cl->host);
}
}
if (cl->enableCursorPosUpdates && !cl->enableCursorShapeUpdates) {
rfbLog("Disabling cursor position updates for client %s\n",
cl->host);
cl->enableCursorPosUpdates = FALSE;
}
return;
}
case rfbFramebufferUpdateRequest:
{
sraRegionPtr tmpRegion;
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbFramebufferUpdateRequestMsg-1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbFramebufferUpdateRequestMsg,sz_rfbFramebufferUpdateRequestMsg);
/* The values come in based on the scaled screen, we need to convert them to
* values based on the main screen's coordinate system
*/
if(!rectSwapIfLEAndClip(&msg.fur.x,&msg.fur.y,&msg.fur.w,&msg.fur.h,cl))
{
rfbLog("Warning, ignoring rfbFramebufferUpdateRequest: %dXx%dY-%dWx%dH\n",msg.fur.x, msg.fur.y, msg.fur.w, msg.fur.h);
return;
}
if (cl->clientFramebufferUpdateRequestHook)
cl->clientFramebufferUpdateRequestHook(cl, &msg.fur);
tmpRegion =
sraRgnCreateRect(msg.fur.x,
msg.fur.y,
msg.fur.x+msg.fur.w,
msg.fur.y+msg.fur.h);
LOCK(cl->updateMutex);
sraRgnOr(cl->requestedRegion,tmpRegion);
if (!cl->readyForSetColourMapEntries) {
/* client hasn't sent a SetPixelFormat so is using server's */
cl->readyForSetColourMapEntries = TRUE;
if (!cl->format.trueColour) {
if (!rfbSetClientColourMap(cl, 0, 0)) {
sraRgnDestroy(tmpRegion);
TSIGNAL(cl->updateCond);
UNLOCK(cl->updateMutex);
return;
}
}
}
if (!msg.fur.incremental) {
sraRgnOr(cl->modifiedRegion,tmpRegion);
sraRgnSubtract(cl->copyRegion,tmpRegion);
if (cl->useExtDesktopSize)
cl->newFBSizePending = TRUE;
}
TSIGNAL(cl->updateCond);
UNLOCK(cl->updateMutex);
sraRgnDestroy(tmpRegion);
return;
}
case rfbKeyEvent:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbKeyEventMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbKeyEventMsg, sz_rfbKeyEventMsg);
if(!cl->viewOnly) {
cl->screen->kbdAddEvent(msg.ke.down, (rfbKeySym)Swap32IfLE(msg.ke.key), cl);
}
return;
case rfbPointerEvent:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbPointerEventMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbPointerEventMsg, sz_rfbPointerEventMsg);
if (cl->screen->pointerClient && cl->screen->pointerClient != cl)
return;
if (msg.pe.buttonMask == 0)
cl->screen->pointerClient = NULL;
else
cl->screen->pointerClient = cl;
if(!cl->viewOnly) {
if (msg.pe.buttonMask != cl->lastPtrButtons ||
cl->screen->deferPtrUpdateTime == 0) {
cl->screen->ptrAddEvent(msg.pe.buttonMask,
ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x)),
ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y)),
cl);
cl->lastPtrButtons = msg.pe.buttonMask;
} else {
cl->lastPtrX = ScaleX(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.x));
cl->lastPtrY = ScaleY(cl->scaledScreen, cl->screen, Swap16IfLE(msg.pe.y));
cl->lastPtrButtons = msg.pe.buttonMask;
}
}
return;
case rfbFileTransfer:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbFileTransferMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.ft.size = Swap32IfLE(msg.ft.size);
msg.ft.length = Swap32IfLE(msg.ft.length);
/* record statistics in rfbProcessFileTransfer as length is filled with garbage when it is not valid */
rfbProcessFileTransfer(cl, msg.ft.contentType, msg.ft.contentParam, msg.ft.size, msg.ft.length);
return;
case rfbSetSW:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetSWMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.sw.x = Swap16IfLE(msg.sw.x);
msg.sw.y = Swap16IfLE(msg.sw.y);
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetSWMsg, sz_rfbSetSWMsg);
/* msg.sw.status is not initialized in the ultraVNC viewer and contains random numbers (why???) */
rfbLog("Received a rfbSetSingleWindow(%d x, %d y)\n", msg.sw.x, msg.sw.y);
if (cl->screen->setSingleWindow!=NULL)
cl->screen->setSingleWindow(cl, msg.sw.x, msg.sw.y);
return;
case rfbSetServerInput:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetServerInputMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetServerInputMsg, sz_rfbSetServerInputMsg);
/* msg.sim.pad is not initialized in the ultraVNC viewer and contains random numbers (why???) */
/* msg.sim.pad = Swap16IfLE(msg.sim.pad); */
rfbLog("Received a rfbSetServerInput(%d status)\n", msg.sim.status);
if (cl->screen->setServerInput!=NULL)
cl->screen->setServerInput(cl, msg.sim.status);
return;
case rfbTextChat:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbTextChatMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.tc.pad2 = Swap16IfLE(msg.tc.pad2);
msg.tc.length = Swap32IfLE(msg.tc.length);
switch (msg.tc.length) {
case rfbTextChatOpen:
case rfbTextChatClose:
case rfbTextChatFinished:
/* commands do not have text following */
/* Why couldn't they have used the pad byte??? */
str=NULL;
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg, sz_rfbTextChatMsg);
break;
default:
if ((msg.tc.length>0) && (msg.tc.length<rfbTextMaxSize))
{
str = (char *)malloc(msg.tc.length);
if (str==NULL)
{
rfbLog("Unable to malloc %d bytes for a TextChat Message\n", msg.tc.length);
rfbCloseClient(cl);
return;
}
if ((n = rfbReadExact(cl, str, msg.tc.length)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
free(str);
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbTextChatMsg+msg.tc.length, sz_rfbTextChatMsg+msg.tc.length);
}
else
{
/* This should never happen */
rfbLog("client sent us a Text Message that is too big %d>%d\n", msg.tc.length, rfbTextMaxSize);
rfbCloseClient(cl);
return;
}
}
/* Note: length can be commands: rfbTextChatOpen, rfbTextChatClose, and rfbTextChatFinished
* at which point, the str is NULL (as it is not sent)
*/
if (cl->screen->setTextChat!=NULL)
cl->screen->setTextChat(cl, msg.tc.length, str);
free(str);
return;
case rfbClientCutText:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbClientCutTextMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
msg.cct.length = Swap32IfLE(msg.cct.length);
/* uint32_t input is passed to malloc()'s size_t argument,
* to rfbReadExact()'s int argument, to rfbStatRecordMessageRcvd()'s int
* argument increased of sz_rfbClientCutTextMsg, and to setXCutText()'s int
* argument. Here we impose a limit of 1 MB so that the value fits
* into all of the types to prevent from misinterpretation and thus
* from accessing uninitialized memory (CVE-2018-7225) and also to
* prevent from a denial-of-service by allocating too much memory in
* the server. */
if (msg.cct.length > 1<<20) {
rfbLog("rfbClientCutText: too big cut text length requested: %u B > 1 MB\n", (unsigned int)msg.cct.length);
rfbCloseClient(cl);
return;
}
/* Allow zero-length client cut text. */
str = (char *)calloc(msg.cct.length ? msg.cct.length : 1, 1);
if (str == NULL) {
rfbLogPerror("rfbProcessClientNormalMessage: not enough memory");
rfbCloseClient(cl);
return;
}
if ((n = rfbReadExact(cl, str, msg.cct.length)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
free(str);
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbClientCutTextMsg+msg.cct.length, sz_rfbClientCutTextMsg+msg.cct.length);
if(!cl->viewOnly) {
cl->screen->setXCutText(str, msg.cct.length, cl);
}
free(str);
return;
case rfbPalmVNCSetScaleFactor:
cl->PalmVNC = TRUE;
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetScaleMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
if (msg.ssc.scale == 0) {
rfbLogPerror("rfbProcessClientNormalMessage: will not accept a scale factor of zero");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg);
rfbLog("rfbSetScale(%d)\n", msg.ssc.scale);
rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale);
rfbSendNewScaleSize(cl);
return;
case rfbSetScale:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetScaleMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
if (msg.ssc.scale == 0) {
rfbLogPerror("rfbProcessClientNormalMessage: will not accept a scale factor of zero");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetScaleMsg, sz_rfbSetScaleMsg);
rfbLog("rfbSetScale(%d)\n", msg.ssc.scale);
rfbScalingSetup(cl,cl->screen->width/msg.ssc.scale, cl->screen->height/msg.ssc.scale);
rfbSendNewScaleSize(cl);
return;
case rfbXvp:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbXvpMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbXvpMsg, sz_rfbXvpMsg);
/* only version when is defined, so echo back a fail */
if(msg.xvp.version != 1) {
rfbSendXvp(cl, msg.xvp.version, rfbXvp_Fail);
}
else {
/* if the hook exists and fails, send a fail msg */
if(cl->screen->xvpHook && !cl->screen->xvpHook(cl, msg.xvp.version, msg.xvp.code))
rfbSendXvp(cl, 1, rfbXvp_Fail);
}
return;
case rfbSetDesktopSize:
if ((n = rfbReadExact(cl, ((char *)&msg) + 1,
sz_rfbSetDesktopSizeMsg - 1)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
rfbCloseClient(cl);
return;
}
if (msg.sdm.numberOfScreens == 0) {
rfbLog("Ignoring setDesktopSize message from client that defines zero screens\n");
return;
}
extDesktopScreens = (rfbExtDesktopScreen *) malloc(msg.sdm.numberOfScreens * sz_rfbExtDesktopScreen);
if (extDesktopScreens == NULL) {
rfbLogPerror("rfbProcessClientNormalMessage: not enough memory");
rfbCloseClient(cl);
return;
}
if ((n = rfbReadExact(cl, ((char *)extDesktopScreens), msg.sdm.numberOfScreens * sz_rfbExtDesktopScreen)) <= 0) {
if (n != 0)
rfbLogPerror("rfbProcessClientNormalMessage: read");
free(extDesktopScreens);
rfbCloseClient(cl);
return;
}
rfbStatRecordMessageRcvd(cl, msg.type, sz_rfbSetDesktopSizeMsg + msg.sdm.numberOfScreens * sz_rfbExtDesktopScreen,
sz_rfbSetDesktopSizeMsg + msg.sdm.numberOfScreens * sz_rfbExtDesktopScreen);
for (i=0; i < msg.sdm.numberOfScreens; i++) {
extDesktopScreens[i].id = Swap32IfLE(extDesktopScreens[i].id);
extDesktopScreens[i].x = Swap16IfLE(extDesktopScreens[i].x);
extDesktopScreens[i].y = Swap16IfLE(extDesktopScreens[i].y);
extDesktopScreens[i].width = Swap16IfLE(extDesktopScreens[i].width);
extDesktopScreens[i].height = Swap16IfLE(extDesktopScreens[i].height);
extDesktopScreens[i].flags = Swap32IfLE(extDesktopScreens[i].flags);
}
msg.sdm.width = Swap16IfLE(msg.sdm.width);
msg.sdm.height = Swap16IfLE(msg.sdm.height);
rfbLog("Client requested resolution change to (%dx%d)\n", msg.sdm.width, msg.sdm.height);
cl->requestedDesktopSizeChange = rfbExtDesktopSize_ClientRequestedChange;
cl->lastDesktopSizeChangeError = cl->screen->setDesktopSizeHook(msg.sdm.width, msg.sdm.height, msg.sdm.numberOfScreens,
extDesktopScreens, cl);
if (cl->lastDesktopSizeChangeError == 0) {
/* Let other clients know it was this client that requested the change */
iterator = rfbGetClientIterator(cl->screen);
while ((clp = rfbClientIteratorNext(iterator)) != NULL) {
LOCK(clp->updateMutex);
if (clp != cl)
clp->requestedDesktopSizeChange = rfbExtDesktopSize_OtherClientRequestedChange;
UNLOCK(clp->updateMutex);
}
}
else
{
/* Force ExtendedDesktopSize message to be sent with result code in case of error.
(In case of success, it is delayed until the new framebuffer is created) */
cl->newFBSizePending = TRUE;
}
free(extDesktopScreens);
return;
default:
{
rfbExtensionData *e,*next;
for(e=cl->extensions; e;) {
next = e->next;
if(e->extension->handleMessage &&
e->extension->handleMessage(cl, e->data, &msg))
{
rfbStatRecordMessageRcvd(cl, msg.type, 0, 0); /* Extension should handle this */
return;
}
e = next;
}
rfbLog("rfbProcessClientNormalMessage: unknown message type %d\n",
msg.type);
rfbLog(" ... closing connection\n");
rfbCloseClient(cl);
return;
}
}
}
/*
* rfbSendFramebufferUpdate - send the currently pending framebuffer update to
* the RFB client.
* givenUpdateRegion is not changed.
*/
rfbBool
rfbSendFramebufferUpdate(rfbClientPtr cl,
sraRegionPtr givenUpdateRegion)
{
sraRectangleIterator* i=NULL;
sraRect rect;
int nUpdateRegionRects;
rfbFramebufferUpdateMsg *fu = (rfbFramebufferUpdateMsg *)cl->updateBuf;
sraRegionPtr updateRegion,updateCopyRegion,tmpRegion;
int dx, dy;
rfbBool sendCursorShape = FALSE;
rfbBool sendCursorPos = FALSE;
rfbBool sendKeyboardLedState = FALSE;
rfbBool sendSupportedMessages = FALSE;
rfbBool sendSupportedEncodings = FALSE;
rfbBool sendServerIdentity = FALSE;
rfbBool result = TRUE;
if(cl->screen->displayHook)
cl->screen->displayHook(cl);
/*
* If framebuffer size was changed and the client supports NewFBSize
* encoding, just send NewFBSize marker and return.
*/
if (cl->useNewFBSize && cl->newFBSizePending) {
LOCK(cl->updateMutex);
cl->newFBSizePending = FALSE;
UNLOCK(cl->updateMutex);
fu->type = rfbFramebufferUpdate;
fu->nRects = Swap16IfLE(1);
cl->ublen = sz_rfbFramebufferUpdateMsg;
if (cl->useExtDesktopSize) {
if (!rfbSendExtDesktopSize(cl, cl->scaledScreen->width, cl->scaledScreen->height)) {
if(cl->screen->displayFinishedHook)
cl->screen->displayFinishedHook(cl, FALSE);
return FALSE;
}
}
else if (!rfbSendNewFBSize(cl, cl->scaledScreen->width, cl->scaledScreen->height)) {
if(cl->screen->displayFinishedHook)
cl->screen->displayFinishedHook(cl, FALSE);
return FALSE;
}
result = rfbSendUpdateBuf(cl);
if(cl->screen->displayFinishedHook)
cl->screen->displayFinishedHook(cl, result);
return result;
}
/*
* If this client understands cursor shape updates, cursor should be
* removed from the framebuffer. Otherwise, make sure it's put up.
*/
if (cl->enableCursorShapeUpdates) {
if (cl->cursorWasChanged && cl->readyForSetColourMapEntries)
sendCursorShape = TRUE;
}
/*
* Do we plan to send cursor position update?
*/
if (cl->enableCursorPosUpdates && cl->cursorWasMoved)
sendCursorPos = TRUE;
/*
* Do we plan to send a keyboard state update?
*/
if ((cl->enableKeyboardLedState) &&
(cl->screen->getKeyboardLedStateHook!=NULL))
{
int x;
x=cl->screen->getKeyboardLedStateHook(cl->screen);
if (x!=cl->lastKeyboardLedState)
{
sendKeyboardLedState = TRUE;
cl->lastKeyboardLedState=x;
}
}
/*
* Do we plan to send a rfbEncodingSupportedMessages?
*/
if (cl->enableSupportedMessages)
{
sendSupportedMessages = TRUE;
/* We only send this message ONCE <per setEncodings message received>
* (We disable it here)
*/
cl->enableSupportedMessages = FALSE;
}
/*
* Do we plan to send a rfbEncodingSupportedEncodings?
*/
if (cl->enableSupportedEncodings)
{
sendSupportedEncodings = TRUE;
/* We only send this message ONCE <per setEncodings message received>
* (We disable it here)
*/
cl->enableSupportedEncodings = FALSE;
}
/*
* Do we plan to send a rfbEncodingServerIdentity?
*/
if (cl->enableServerIdentity)
{
sendServerIdentity = TRUE;
/* We only send this message ONCE <per setEncodings message received>
* (We disable it here)
*/
cl->enableServerIdentity = FALSE;
}
LOCK(cl->updateMutex);
/*
* The modifiedRegion may overlap the destination copyRegion. We remove
* any overlapping bits from the copyRegion (since they'd only be
* overwritten anyway).
*/
sraRgnSubtract(cl->copyRegion,cl->modifiedRegion);
/*
* The client is interested in the region requestedRegion. The region
* which should be updated now is the intersection of requestedRegion
* and the union of modifiedRegion and copyRegion. If it's empty then
* no update is needed.
*/
updateRegion = sraRgnCreateRgn(givenUpdateRegion);
if(cl->screen->progressiveSliceHeight>0) {
int height=cl->screen->progressiveSliceHeight,
y=cl->progressiveSliceY;
sraRegionPtr bbox=sraRgnBBox(updateRegion);
sraRect rect;
if(sraRgnPopRect(bbox,&rect,0)) {
sraRegionPtr slice;
if(y<rect.y1 || y>=rect.y2)
y=rect.y1;
slice=sraRgnCreateRect(0,y,cl->screen->width,y+height);
sraRgnAnd(updateRegion,slice);
sraRgnDestroy(slice);
}
sraRgnDestroy(bbox);
y+=height;
if(y>=cl->screen->height)
y=0;
cl->progressiveSliceY=y;
}
sraRgnOr(updateRegion,cl->copyRegion);
if(!sraRgnAnd(updateRegion,cl->requestedRegion) &&
sraRgnEmpty(updateRegion) &&
(cl->enableCursorShapeUpdates ||
(cl->cursorX == cl->screen->cursorX && cl->cursorY == cl->screen->cursorY)) &&
!sendCursorShape && !sendCursorPos && !sendKeyboardLedState &&
!sendSupportedMessages && !sendSupportedEncodings && !sendServerIdentity) {
sraRgnDestroy(updateRegion);
UNLOCK(cl->updateMutex);
if(cl->screen->displayFinishedHook)
cl->screen->displayFinishedHook(cl, TRUE);
return TRUE;
}
/*
* We assume that the client doesn't have any pixel data outside the
* requestedRegion. In other words, both the source and destination of a
* copy must lie within requestedRegion. So the region we can send as a
* copy is the intersection of the copyRegion with both the requestedRegion
* and the requestedRegion translated by the amount of the copy. We set
* updateCopyRegion to this.
*/
updateCopyRegion = sraRgnCreateRgn(cl->copyRegion);
sraRgnAnd(updateCopyRegion,cl->requestedRegion);
tmpRegion = sraRgnCreateRgn(cl->requestedRegion);
sraRgnOffset(tmpRegion,cl->copyDX,cl->copyDY);
sraRgnAnd(updateCopyRegion,tmpRegion);
sraRgnDestroy(tmpRegion);
dx = cl->copyDX;
dy = cl->copyDY;
/*
* Next we remove updateCopyRegion from updateRegion so that updateRegion
* is the part of this update which is sent as ordinary pixel data (i.e not
* a copy).
*/
sraRgnSubtract(updateRegion,updateCopyRegion);
/*
* Finally we leave modifiedRegion to be the remainder (if any) of parts of
* the screen which are modified but outside the requestedRegion. We also
* empty both the requestedRegion and the copyRegion - note that we never
* carry over a copyRegion for a future update.
*/
sraRgnOr(cl->modifiedRegion,cl->copyRegion);
sraRgnSubtract(cl->modifiedRegion,updateRegion);
sraRgnSubtract(cl->modifiedRegion,updateCopyRegion);
sraRgnMakeEmpty(cl->requestedRegion);
sraRgnMakeEmpty(cl->copyRegion);
cl->copyDX = 0;
cl->copyDY = 0;
UNLOCK(cl->updateMutex);
if (!cl->enableCursorShapeUpdates) {
if(cl->cursorX != cl->screen->cursorX || cl->cursorY != cl->screen->cursorY) {
rfbRedrawAfterHideCursor(cl,updateRegion);
LOCK(cl->screen->cursorMutex);
cl->cursorX = cl->screen->cursorX;
cl->cursorY = cl->screen->cursorY;
UNLOCK(cl->screen->cursorMutex);
rfbRedrawAfterHideCursor(cl,updateRegion);
}
rfbShowCursor(cl);
}
/*
* Now send the update.
*/
rfbStatRecordMessageSent(cl, rfbFramebufferUpdate, 0, 0);
if (cl->preferredEncoding == rfbEncodingCoRRE) {
nUpdateRegionRects = 0;
for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){
int x = rect.x1;
int y = rect.y1;
int w = rect.x2 - x;
int h = rect.y2 - y;
int rectsPerRow, rows;
/* We need to count the number of rects in the scaled screen */
if (cl->screen!=cl->scaledScreen)
rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate");
rectsPerRow = (w-1)/cl->correMaxWidth+1;
rows = (h-1)/cl->correMaxHeight+1;
nUpdateRegionRects += rectsPerRow*rows;
}
sraRgnReleaseIterator(i); i=NULL;
} else if (cl->preferredEncoding == rfbEncodingUltra) {
nUpdateRegionRects = 0;
for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){
int x = rect.x1;
int y = rect.y1;
int w = rect.x2 - x;
int h = rect.y2 - y;
/* We need to count the number of rects in the scaled screen */
if (cl->screen!=cl->scaledScreen)
rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate");
nUpdateRegionRects += (((h-1) / (ULTRA_MAX_SIZE( w ) / w)) + 1);
}
sraRgnReleaseIterator(i); i=NULL;
#ifdef LIBVNCSERVER_HAVE_LIBZ
} else if (cl->preferredEncoding == rfbEncodingZlib) {
nUpdateRegionRects = 0;
for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){
int x = rect.x1;
int y = rect.y1;
int w = rect.x2 - x;
int h = rect.y2 - y;
/* We need to count the number of rects in the scaled screen */
if (cl->screen!=cl->scaledScreen)
rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate");
nUpdateRegionRects += (((h-1) / (ZLIB_MAX_SIZE( w ) / w)) + 1);
}
sraRgnReleaseIterator(i); i=NULL;
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
} else if (cl->preferredEncoding == rfbEncodingTight) {
nUpdateRegionRects = 0;
for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){
int x = rect.x1;
int y = rect.y1;
int w = rect.x2 - x;
int h = rect.y2 - y;
int n;
/* We need to count the number of rects in the scaled screen */
if (cl->screen!=cl->scaledScreen)
rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate");
n = rfbNumCodedRectsTight(cl, x, y, w, h);
if (n == 0) {
nUpdateRegionRects = 0xFFFF;
break;
}
nUpdateRegionRects += n;
}
sraRgnReleaseIterator(i); i=NULL;
#endif
#endif
#if defined(LIBVNCSERVER_HAVE_LIBJPEG) && defined(LIBVNCSERVER_HAVE_LIBPNG)
} else if (cl->preferredEncoding == rfbEncodingTightPng) {
nUpdateRegionRects = 0;
for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){
int x = rect.x1;
int y = rect.y1;
int w = rect.x2 - x;
int h = rect.y2 - y;
int n;
/* We need to count the number of rects in the scaled screen */
if (cl->screen!=cl->scaledScreen)
rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate");
n = rfbNumCodedRectsTight(cl, x, y, w, h);
if (n == 0) {
nUpdateRegionRects = 0xFFFF;
break;
}
nUpdateRegionRects += n;
}
sraRgnReleaseIterator(i); i=NULL;
#endif
} else {
nUpdateRegionRects = sraRgnCountRects(updateRegion);
}
fu->type = rfbFramebufferUpdate;
if (nUpdateRegionRects != 0xFFFF) {
if(cl->screen->maxRectsPerUpdate>0
/* CoRRE splits the screen into smaller squares */
&& cl->preferredEncoding != rfbEncodingCoRRE
/* Ultra encoding splits rectangles up into smaller chunks */
&& cl->preferredEncoding != rfbEncodingUltra
#ifdef LIBVNCSERVER_HAVE_LIBZ
/* Zlib encoding splits rectangles up into smaller chunks */
&& cl->preferredEncoding != rfbEncodingZlib
#ifdef LIBVNCSERVER_HAVE_LIBJPEG
/* Tight encoding counts the rectangles differently */
&& cl->preferredEncoding != rfbEncodingTight
#endif
#endif
#ifdef LIBVNCSERVER_HAVE_LIBPNG
/* Tight encoding counts the rectangles differently */
&& cl->preferredEncoding != rfbEncodingTightPng
#endif
&& nUpdateRegionRects>cl->screen->maxRectsPerUpdate) {
sraRegion* newUpdateRegion = sraRgnBBox(updateRegion);
sraRgnDestroy(updateRegion);
updateRegion = newUpdateRegion;
nUpdateRegionRects = sraRgnCountRects(updateRegion);
}
fu->nRects = Swap16IfLE((uint16_t)(sraRgnCountRects(updateCopyRegion) +
nUpdateRegionRects +
!!sendCursorShape + !!sendCursorPos + !!sendKeyboardLedState +
!!sendSupportedMessages + !!sendSupportedEncodings + !!sendServerIdentity));
} else {
fu->nRects = 0xFFFF;
}
cl->ublen = sz_rfbFramebufferUpdateMsg;
if (sendCursorShape) {
cl->cursorWasChanged = FALSE;
if (!rfbSendCursorShape(cl))
goto updateFailed;
}
if (sendCursorPos) {
cl->cursorWasMoved = FALSE;
if (!rfbSendCursorPos(cl))
goto updateFailed;
}
if (sendKeyboardLedState) {
if (!rfbSendKeyboardLedState(cl))
goto updateFailed;
}
if (sendSupportedMessages) {
if (!rfbSendSupportedMessages(cl))
goto updateFailed;
}
if (sendSupportedEncodings) {
if (!rfbSendSupportedEncodings(cl))
goto updateFailed;
}
if (sendServerIdentity) {
if (!rfbSendServerIdentity(cl))
goto updateFailed;
}
if (!sraRgnEmpty(updateCopyRegion)) {
if (!rfbSendCopyRegion(cl,updateCopyRegion,dx,dy))
goto updateFailed;
}
for(i = sraRgnGetIterator(updateRegion); sraRgnIteratorNext(i,&rect);){
int x = rect.x1;
int y = rect.y1;
int w = rect.x2 - x;
int h = rect.y2 - y;
/* We need to count the number of rects in the scaled screen */
if (cl->screen!=cl->scaledScreen)
rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "rfbSendFramebufferUpdate");
switch (cl->preferredEncoding) {
case -1:
case rfbEncodingRaw:
if (!rfbSendRectEncodingRaw(cl, x, y, w, h))
goto updateFailed;
break;
case rfbEncodingRRE:
if (!rfbSendRectEncodingRRE(cl, x, y, w, h))
goto updateFailed;
break;
case rfbEncodingCoRRE:
if (!rfbSendRectEncodingCoRRE(cl, x, y, w, h))
goto updateFailed;
break;
case rfbEncodingHextile:
if (!rfbSendRectEncodingHextile(cl, x, y, w, h))
goto updateFailed;
break;
case rfbEncodingUltra:
if (!rfbSendRectEncodingUltra(cl, x, y, w, h))
goto updateFailed;
break;
#ifdef LIBVNCSERVER_HAVE_LIBZ
case rfbEncodingZlib:
if (!rfbSendRectEncodingZlib(cl, x, y, w, h))
goto updateFailed;
break;
case rfbEncodingZRLE:
case rfbEncodingZYWRLE:
if (!rfbSendRectEncodingZRLE(cl, x, y, w, h))
goto updateFailed;
break;
#endif
#if defined(LIBVNCSERVER_HAVE_LIBJPEG) && (defined(LIBVNCSERVER_HAVE_LIBZ) || defined(LIBVNCSERVER_HAVE_LIBPNG))
case rfbEncodingTight:
if (!rfbSendRectEncodingTight(cl, x, y, w, h))
goto updateFailed;
break;
#ifdef LIBVNCSERVER_HAVE_LIBPNG
case rfbEncodingTightPng:
if (!rfbSendRectEncodingTightPng(cl, x, y, w, h))
goto updateFailed;
break;
#endif
#endif
}
}
if (i) {
sraRgnReleaseIterator(i);
i = NULL;
}
if ( nUpdateRegionRects == 0xFFFF &&
!rfbSendLastRectMarker(cl) )
goto updateFailed;
if (!rfbSendUpdateBuf(cl)) {
updateFailed:
result = FALSE;
}
if (!cl->enableCursorShapeUpdates) {
rfbHideCursor(cl);
}
if(i)
sraRgnReleaseIterator(i);
sraRgnDestroy(updateRegion);
sraRgnDestroy(updateCopyRegion);
if(cl->screen->displayFinishedHook)
cl->screen->displayFinishedHook(cl, result);
return result;
}
/*
* Send the copy region as a string of CopyRect encoded rectangles.
* The only slightly tricky thing is that we should send the messages in
* the correct order so that an earlier CopyRect will not corrupt the source
* of a later one.
*/
rfbBool
rfbSendCopyRegion(rfbClientPtr cl,
sraRegionPtr reg,
int dx,
int dy)
{
int x, y, w, h;
rfbFramebufferUpdateRectHeader rect;
rfbCopyRect cr;
sraRectangleIterator* i;
sraRect rect1;
/* printf("copyrect: "); sraRgnPrint(reg); putchar('\n');fflush(stdout); */
i = sraRgnGetReverseIterator(reg,dx>0,dy>0);
/* correct for the scale of the screen */
dx = ScaleX(cl->screen, cl->scaledScreen, dx);
dy = ScaleX(cl->screen, cl->scaledScreen, dy);
while(sraRgnIteratorNext(i,&rect1)) {
x = rect1.x1;
y = rect1.y1;
w = rect1.x2 - x;
h = rect1.y2 - y;
/* correct for scaling (if necessary) */
rfbScaledCorrection(cl->screen, cl->scaledScreen, &x, &y, &w, &h, "copyrect");
rect.r.x = Swap16IfLE(x);
rect.r.y = Swap16IfLE(y);
rect.r.w = Swap16IfLE(w);
rect.r.h = Swap16IfLE(h);
rect.encoding = Swap32IfLE(rfbEncodingCopyRect);
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,
sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
cr.srcX = Swap16IfLE(x - dx);
cr.srcY = Swap16IfLE(y - dy);
memcpy(&cl->updateBuf[cl->ublen], (char *)&cr, sz_rfbCopyRect);
cl->ublen += sz_rfbCopyRect;
rfbStatRecordEncodingSent(cl, rfbEncodingCopyRect, sz_rfbFramebufferUpdateRectHeader + sz_rfbCopyRect,
w * h * (cl->scaledScreen->bitsPerPixel / 8));
}
sraRgnReleaseIterator(i);
return TRUE;
}
/*
* Send a given rectangle in raw encoding (rfbEncodingRaw).
*/
rfbBool
rfbSendRectEncodingRaw(rfbClientPtr cl,
int x,
int y,
int w,
int h)
{
rfbFramebufferUpdateRectHeader rect;
int nlines;
int bytesPerLine = w * (cl->format.bitsPerPixel / 8);
char *fbptr = (cl->scaledScreen->frameBuffer + (cl->scaledScreen->paddedWidthInBytes * y)
+ (x * (cl->scaledScreen->bitsPerPixel / 8)));
if(!h || !w)
return TRUE; /* nothing to send */
/* Flush the buffer to guarantee correct alignment for translateFn(). */
if (cl->ublen > 0) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
rect.r.x = Swap16IfLE(x);
rect.r.y = Swap16IfLE(y);
rect.r.w = Swap16IfLE(w);
rect.r.h = Swap16IfLE(h);
rect.encoding = Swap32IfLE(rfbEncodingRaw);
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
rfbStatRecordEncodingSent(cl, rfbEncodingRaw, sz_rfbFramebufferUpdateRectHeader + bytesPerLine * h,
sz_rfbFramebufferUpdateRectHeader + bytesPerLine * h);
nlines = (UPDATE_BUF_SIZE - cl->ublen) / bytesPerLine;
while (TRUE) {
if (nlines > h)
nlines = h;
(*cl->translateFn)(cl->translateLookupTable,
&(cl->screen->serverFormat),
&cl->format, fbptr, &cl->updateBuf[cl->ublen],
cl->scaledScreen->paddedWidthInBytes, w, nlines);
cl->ublen += nlines * bytesPerLine;
h -= nlines;
if (h == 0) /* rect fitted in buffer, do next one */
return TRUE;
/* buffer full - flush partial rect and do another nlines */
if (!rfbSendUpdateBuf(cl))
return FALSE;
fbptr += (cl->scaledScreen->paddedWidthInBytes * nlines);
nlines = (UPDATE_BUF_SIZE - cl->ublen) / bytesPerLine;
if (nlines == 0) {
rfbErr("rfbSendRectEncodingRaw: send buffer too small for %d "
"bytes per line\n", bytesPerLine);
rfbCloseClient(cl);
return FALSE;
}
}
}
/*
* Send an empty rectangle with encoding field set to value of
* rfbEncodingLastRect to notify client that this is the last
* rectangle in framebuffer update ("LastRect" extension of RFB
* protocol).
*/
rfbBool
rfbSendLastRectMarker(rfbClientPtr cl)
{
rfbFramebufferUpdateRectHeader rect;
if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
rect.encoding = Swap32IfLE(rfbEncodingLastRect);
rect.r.x = 0;
rect.r.y = 0;
rect.r.w = 0;
rect.r.h = 0;
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
rfbStatRecordEncodingSent(cl, rfbEncodingLastRect, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader);
return TRUE;
}
/*
* Send NewFBSize pseudo-rectangle. This tells the client to change
* its framebuffer size.
*/
rfbBool
rfbSendNewFBSize(rfbClientPtr cl,
int w,
int h)
{
rfbFramebufferUpdateRectHeader rect;
if (cl->ublen + sz_rfbFramebufferUpdateRectHeader > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
if (cl->PalmVNC==TRUE)
rfbLog("Sending rfbEncodingNewFBSize in response to a PalmVNC style framebuffer resize (%dx%d)\n", w, h);
else
rfbLog("Sending rfbEncodingNewFBSize for resize to (%dx%d)\n", w, h);
rect.encoding = Swap32IfLE(rfbEncodingNewFBSize);
rect.r.x = 0;
rect.r.y = 0;
rect.r.w = Swap16IfLE(w);
rect.r.h = Swap16IfLE(h);
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,
sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
rfbStatRecordEncodingSent(cl, rfbEncodingNewFBSize, sz_rfbFramebufferUpdateRectHeader, sz_rfbFramebufferUpdateRectHeader);
return TRUE;
}
/*
* Send ExtDesktopSize pseudo-rectangle. This message is used:
* - to tell the client to change its framebuffer size
* - at the start of the session to inform the client we support size changes through setDesktopSize
* - in response to setDesktopSize commands to indicate success or failure
*/
rfbBool
rfbSendExtDesktopSize(rfbClientPtr cl,
int w,
int h)
{
rfbFramebufferUpdateRectHeader rect;
rfbExtDesktopSizeMsg edsHdr;
rfbExtDesktopScreen eds;
int i;
char *logmsg;
int numScreens = cl->screen->numberOfExtDesktopScreensHook(cl);
if (cl->ublen + sz_rfbFramebufferUpdateRectHeader
+ sz_rfbExtDesktopSizeMsg
+ sz_rfbExtDesktopScreen * numScreens > UPDATE_BUF_SIZE) {
if (!rfbSendUpdateBuf(cl))
return FALSE;
}
rect.encoding = Swap32IfLE(rfbEncodingExtDesktopSize);
rect.r.w = Swap16IfLE(w);
rect.r.h = Swap16IfLE(h);
rect.r.x = Swap16IfLE(cl->requestedDesktopSizeChange);
rect.r.y = Swap16IfLE(cl->lastDesktopSizeChangeError);
logmsg = "";
if (cl->requestedDesktopSizeChange == rfbExtDesktopSize_ClientRequestedChange)
{
/* our client requested the resize through setDesktopSize */
switch (cl->lastDesktopSizeChangeError)
{
case rfbExtDesktopSize_Success:
logmsg = "resize successful";
break;
case rfbExtDesktopSize_ResizeProhibited:
logmsg = "resize prohibited";
break;
case rfbExtDesktopSize_OutOfResources:
logmsg = "resize failed: out of resources";
break;
case rfbExtDesktopSize_InvalidScreenLayout:
logmsg = "resize failed: invalid screen layout";
break;
default:
break;
}
}
cl->requestedDesktopSizeChange = 0;
cl->lastDesktopSizeChangeError = 0;
rfbLog("Sending rfbEncodingExtDesktopSize for size (%dx%d) %s\n", w, h, logmsg);
memcpy(&cl->updateBuf[cl->ublen], (char *)&rect,
sz_rfbFramebufferUpdateRectHeader);
cl->ublen += sz_rfbFramebufferUpdateRectHeader;
edsHdr.numberOfScreens = numScreens;
edsHdr.pad[0] = edsHdr.pad[1] = edsHdr.pad[2] = 0;
memcpy(&cl->updateBuf[cl->ublen], (char *)&edsHdr,
sz_rfbExtDesktopSizeMsg);
cl->ublen += sz_rfbExtDesktopSizeMsg;
for (i=0; i<numScreens; i++) {
if (!cl->screen->getExtDesktopScreenHook(i, &eds, cl))
{
rfbErr("Error getting ExtendedDesktopSize information for screen #%d\n", i);
return FALSE;
}
eds.id = Swap32IfLE(eds.id);
eds.x = Swap16IfLE(eds.x);
eds.y = Swap16IfLE(eds.y);
eds.width = Swap16IfLE(eds.width);
eds.height = Swap16IfLE(eds.height);
eds.flags = Swap32IfLE(eds.flags);
memcpy(&cl->updateBuf[cl->ublen], (char *)&eds,
sz_rfbExtDesktopScreen);
cl->ublen += sz_rfbExtDesktopScreen;
}
rfbStatRecordEncodingSent(cl, rfbEncodingExtDesktopSize,
sz_rfbFramebufferUpdateRectHeader + sz_rfbExtDesktopSizeMsg + sz_rfbExtDesktopScreen * numScreens,
sz_rfbFramebufferUpdateRectHeader + sz_rfbExtDesktopSizeMsg + sz_rfbExtDesktopScreen * numScreens);
return TRUE;
}
/*
* Send the contents of cl->updateBuf. Returns 1 if successful, -1 if
* not (errno should be set).
*/
rfbBool
rfbSendUpdateBuf(rfbClientPtr cl)
{
if(cl->sock<0)
return FALSE;
if (rfbWriteExact(cl, cl->updateBuf, cl->ublen) < 0) {
rfbLogPerror("rfbSendUpdateBuf: write");
rfbCloseClient(cl);
return FALSE;
}
cl->ublen = 0;
return TRUE;
}
/*
* rfbSendSetColourMapEntries sends a SetColourMapEntries message to the
* client, using values from the currently installed colormap.
*/
rfbBool
rfbSendSetColourMapEntries(rfbClientPtr cl,
int firstColour,
int nColours)
{
char buf[sz_rfbSetColourMapEntriesMsg + 256 * 3 * 2];
char *wbuf = buf;
rfbSetColourMapEntriesMsg *scme;
uint16_t *rgb;
rfbColourMap* cm = &cl->screen->colourMap;
int i, len;
if (nColours > 256) {
/* some rare hardware has, e.g., 4096 colors cells: PseudoColor:12 */
wbuf = (char *) malloc(sz_rfbSetColourMapEntriesMsg + nColours * 3 * 2);
}
scme = (rfbSetColourMapEntriesMsg *)wbuf;
rgb = (uint16_t *)(&wbuf[sz_rfbSetColourMapEntriesMsg]);
scme->type = rfbSetColourMapEntries;
scme->firstColour = Swap16IfLE(firstColour);
scme->nColours = Swap16IfLE(nColours);
len = sz_rfbSetColourMapEntriesMsg;
for (i = 0; i < nColours; i++) {
if(i<(int)cm->count) {
if(cm->is16) {
rgb[i*3] = Swap16IfLE(cm->data.shorts[i*3]);
rgb[i*3+1] = Swap16IfLE(cm->data.shorts[i*3+1]);
rgb[i*3+2] = Swap16IfLE(cm->data.shorts[i*3+2]);
} else {
rgb[i*3] = Swap16IfLE((unsigned short)cm->data.bytes[i*3]);
rgb[i*3+1] = Swap16IfLE((unsigned short)cm->data.bytes[i*3+1]);
rgb[i*3+2] = Swap16IfLE((unsigned short)cm->data.bytes[i*3+2]);
}
}
}
len += nColours * 3 * 2;
LOCK(cl->sendMutex);
if (rfbWriteExact(cl, wbuf, len) < 0) {
rfbLogPerror("rfbSendSetColourMapEntries: write");
rfbCloseClient(cl);
if (wbuf != buf) free(wbuf);
UNLOCK(cl->sendMutex);
return FALSE;
}
UNLOCK(cl->sendMutex);
rfbStatRecordMessageSent(cl, rfbSetColourMapEntries, len, len);
if (wbuf != buf) free(wbuf);
return TRUE;
}
/*
* rfbSendBell sends a Bell message to all the clients.
*/
void
rfbSendBell(rfbScreenInfoPtr rfbScreen)
{
rfbClientIteratorPtr i;
rfbClientPtr cl;
rfbBellMsg b;
i = rfbGetClientIterator(rfbScreen);
while((cl=rfbClientIteratorNext(i))) {
b.type = rfbBell;
LOCK(cl->sendMutex);
if (rfbWriteExact(cl, (char *)&b, sz_rfbBellMsg) < 0) {
rfbLogPerror("rfbSendBell: write");
rfbCloseClient(cl);
}
UNLOCK(cl->sendMutex);
}
rfbStatRecordMessageSent(cl, rfbBell, sz_rfbBellMsg, sz_rfbBellMsg);
rfbReleaseClientIterator(i);
}
/*
* rfbSendServerCutText sends a ServerCutText message to all the clients.
*/
void
rfbSendServerCutText(rfbScreenInfoPtr rfbScreen,char *str, int len)
{
rfbClientPtr cl;
rfbServerCutTextMsg sct;
rfbClientIteratorPtr iterator;
memset((char *)&sct, 0, sizeof(sct));
iterator = rfbGetClientIterator(rfbScreen);
while ((cl = rfbClientIteratorNext(iterator)) != NULL) {
sct.type = rfbServerCutText;
sct.length = Swap32IfLE(len);
LOCK(cl->sendMutex);
if (rfbWriteExact(cl, (char *)&sct,
sz_rfbServerCutTextMsg) < 0) {
rfbLogPerror("rfbSendServerCutText: write");
rfbCloseClient(cl);
UNLOCK(cl->sendMutex);
continue;
}
if (rfbWriteExact(cl, str, len) < 0) {
rfbLogPerror("rfbSendServerCutText: write");
rfbCloseClient(cl);
}
UNLOCK(cl->sendMutex);
rfbStatRecordMessageSent(cl, rfbServerCutText, sz_rfbServerCutTextMsg+len, sz_rfbServerCutTextMsg+len);
}
rfbReleaseClientIterator(iterator);
}
/*****************************************************************************
*
* UDP can be used for keyboard and pointer events when the underlying
* network is highly reliable. This is really here to support ORL's
* videotile, whose TCP implementation doesn't like sending lots of small
* packets (such as 100s of pen readings per second!).
*/
static unsigned char ptrAcceleration = 50;
void
rfbNewUDPConnection(rfbScreenInfoPtr rfbScreen,
rfbSocket sock)
{
if (write(sock, (char*) &ptrAcceleration, 1) < 0) {
rfbLogPerror("rfbNewUDPConnection: write");
}
}
/*
* Because UDP is a message based service, we can't read the first byte and
* then the rest of the packet separately like we do with TCP. We will always
* get a whole packet delivered in one go, so we ask read() for the maximum
* number of bytes we can possibly get.
*/
void
rfbProcessUDPInput(rfbScreenInfoPtr rfbScreen)
{
int n;
rfbClientPtr cl=rfbScreen->udpClient;
rfbClientToServerMsg msg;
if((!cl) || cl->onHold)
return;
if ((n = read(rfbScreen->udpSock, (char *)&msg, sizeof(msg))) <= 0) {
if (n < 0) {
rfbLogPerror("rfbProcessUDPInput: read");
}
rfbDisconnectUDPSock(rfbScreen);
return;
}
switch (msg.type) {
case rfbKeyEvent:
if (n != sz_rfbKeyEventMsg) {
rfbErr("rfbProcessUDPInput: key event incorrect length\n");
rfbDisconnectUDPSock(rfbScreen);
return;
}
cl->screen->kbdAddEvent(msg.ke.down, (rfbKeySym)Swap32IfLE(msg.ke.key), cl);
break;
case rfbPointerEvent:
if (n != sz_rfbPointerEventMsg) {
rfbErr("rfbProcessUDPInput: ptr event incorrect length\n");
rfbDisconnectUDPSock(rfbScreen);
return;
}
cl->screen->ptrAddEvent(msg.pe.buttonMask,
Swap16IfLE(msg.pe.x), Swap16IfLE(msg.pe.y), cl);
break;
default:
rfbErr("rfbProcessUDPInput: unknown message type %d\n",
msg.type);
rfbDisconnectUDPSock(rfbScreen);
}
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_4058_1 |
crossvul-cpp_data_bad_445_0 | /*
* uriparser - RFC 3986 URI parsing library
*
* Copyright (C) 2007, Weijia Song <songweijia@gmail.com>
* Copyright (C) 2007, Sebastian Pipping <sebastian@pipping.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* * Neither the name of the <ORGANIZATION> nor the names of its
* contributors may be used to endorse or promote products
* derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* What encodings are enabled? */
#include <uriparser/UriDefsConfig.h>
#if (!defined(URI_PASS_ANSI) && !defined(URI_PASS_UNICODE))
/* Include SELF twice */
# ifdef URI_ENABLE_ANSI
# define URI_PASS_ANSI 1
# include "UriCommon.c"
# undef URI_PASS_ANSI
# endif
# ifdef URI_ENABLE_UNICODE
# define URI_PASS_UNICODE 1
# include "UriCommon.c"
# undef URI_PASS_UNICODE
# endif
#else
# ifdef URI_PASS_ANSI
# include <uriparser/UriDefsAnsi.h>
# else
# include <uriparser/UriDefsUnicode.h>
# include <wchar.h>
# endif
#ifndef URI_DOXYGEN
# include <uriparser/Uri.h>
# include "UriCommon.h"
#endif
/*extern*/ const URI_CHAR * const URI_FUNC(SafeToPointTo) = _UT("X");
/*extern*/ const URI_CHAR * const URI_FUNC(ConstPwd) = _UT(".");
/*extern*/ const URI_CHAR * const URI_FUNC(ConstParent) = _UT("..");
void URI_FUNC(ResetUri)(URI_TYPE(Uri) * uri) {
memset(uri, 0, sizeof(URI_TYPE(Uri)));
}
/* Compares two text ranges for equal text content */
int URI_FUNC(CompareRange)(
const URI_TYPE(TextRange) * a,
const URI_TYPE(TextRange) * b) {
int diff;
/* NOTE: Both NULL means equal! */
if ((a == NULL) || (b == NULL)) {
return ((a == NULL) ? 0 : 1) - ((b == NULL) ? 0 : 1);
}
/* NOTE: Both NULL means equal! */
if ((a->first == NULL) || (b->first == NULL)) {
return ((a->first == NULL) ? 0 : 1) - ((b->first == NULL) ? 0 : 1);
}
diff = ((int)(a->afterLast - a->first) - (int)(b->afterLast - b->first));
if (diff > 0) {
return 1;
} else if (diff < 0) {
return -1;
}
diff = URI_STRNCMP(a->first, b->first, (a->afterLast - a->first));
if (diff > 0) {
return 1;
} else if (diff < 0) {
return -1;
}
return diff;
}
/* Properly removes "." and ".." path segments */
UriBool URI_FUNC(RemoveDotSegments)(URI_TYPE(Uri) * uri,
UriBool relative, UriMemoryManager * memory) {
if (uri == NULL) {
return URI_TRUE;
}
return URI_FUNC(RemoveDotSegmentsEx)(uri, relative, uri->owner, memory);
}
UriBool URI_FUNC(RemoveDotSegmentsEx)(URI_TYPE(Uri) * uri,
UriBool relative, UriBool pathOwned, UriMemoryManager * memory) {
URI_TYPE(PathSegment) * walker;
if ((uri == NULL) || (uri->pathHead == NULL)) {
return URI_TRUE;
}
walker = uri->pathHead;
walker->reserved = NULL; /* Prev pointer */
do {
UriBool removeSegment = URI_FALSE;
int len = (int)(walker->text.afterLast - walker->text.first);
switch (len) {
case 1:
if ((walker->text.first)[0] == _UT('.')) {
/* "." segment -> remove if not essential */
URI_TYPE(PathSegment) * const prev = walker->reserved;
URI_TYPE(PathSegment) * const nextBackup = walker->next;
/* Is this dot segment essential? */
removeSegment = URI_TRUE;
if (relative && (walker == uri->pathHead) && (walker->next != NULL)) {
const URI_CHAR * ch = walker->next->text.first;
for (; ch < walker->next->text.afterLast; ch++) {
if (*ch == _UT(':')) {
removeSegment = URI_FALSE;
break;
}
}
}
if (removeSegment) {
/* Last segment? */
if (walker->next != NULL) {
/* Not last segment */
walker->next->reserved = prev;
if (prev == NULL) {
/* First but not last segment */
uri->pathHead = walker->next;
} else {
/* Middle segment */
prev->next = walker->next;
}
if (pathOwned && (walker->text.first != walker->text.afterLast)) {
memory->free(memory, (URI_CHAR *)walker->text.first);
}
memory->free(memory, walker);
} else {
/* Last segment */
if (pathOwned && (walker->text.first != walker->text.afterLast)) {
memory->free(memory, (URI_CHAR *)walker->text.first);
}
if (prev == NULL) {
/* Last and first */
if (URI_FUNC(IsHostSet)(uri)) {
/* Replace "." with empty segment to represent trailing slash */
walker->text.first = URI_FUNC(SafeToPointTo);
walker->text.afterLast = URI_FUNC(SafeToPointTo);
} else {
memory->free(memory, walker);
uri->pathHead = NULL;
uri->pathTail = NULL;
}
} else {
/* Last but not first, replace "." with empty segment to represent trailing slash */
walker->text.first = URI_FUNC(SafeToPointTo);
walker->text.afterLast = URI_FUNC(SafeToPointTo);
}
}
walker = nextBackup;
}
}
break;
case 2:
if (((walker->text.first)[0] == _UT('.'))
&& ((walker->text.first)[1] == _UT('.'))) {
/* Path ".." -> remove this and the previous segment */
URI_TYPE(PathSegment) * const prev = walker->reserved;
URI_TYPE(PathSegment) * prevPrev;
URI_TYPE(PathSegment) * const nextBackup = walker->next;
removeSegment = URI_TRUE;
if (relative) {
if (prev == NULL) {
removeSegment = URI_FALSE;
} else if ((prev != NULL)
&& ((prev->text.afterLast - prev->text.first) == 2)
&& ((prev->text.first)[0] == _UT('.'))
&& ((prev->text.first)[1] == _UT('.'))) {
removeSegment = URI_FALSE;
}
}
if (removeSegment) {
if (prev != NULL) {
/* Not first segment */
prevPrev = prev->reserved;
if (prevPrev != NULL) {
/* Not even prev is the first one */
prevPrev->next = walker->next;
if (walker->next != NULL) {
walker->next->reserved = prevPrev;
} else {
/* Last segment -> insert "" segment to represent trailing slash, update tail */
URI_TYPE(PathSegment) * const segment = memory->calloc(memory, 1, sizeof(URI_TYPE(PathSegment)));
if (segment == NULL) {
if (pathOwned && (walker->text.first != walker->text.afterLast)) {
memory->free(memory, (URI_CHAR *)walker->text.first);
}
memory->free(memory, walker);
if (pathOwned && (prev->text.first != prev->text.afterLast)) {
memory->free(memory, (URI_CHAR *)prev->text.first);
}
memory->free(memory, prev);
return URI_FALSE; /* Raises malloc error */
}
segment->text.first = URI_FUNC(SafeToPointTo);
segment->text.afterLast = URI_FUNC(SafeToPointTo);
prevPrev->next = segment;
uri->pathTail = segment;
}
if (pathOwned && (walker->text.first != walker->text.afterLast)) {
memory->free(memory, (URI_CHAR *)walker->text.first);
}
memory->free(memory, walker);
if (pathOwned && (prev->text.first != prev->text.afterLast)) {
memory->free(memory, (URI_CHAR *)prev->text.first);
}
memory->free(memory, prev);
walker = nextBackup;
} else {
/* Prev is the first segment */
if (walker->next != NULL) {
uri->pathHead = walker->next;
walker->next->reserved = NULL;
if (pathOwned && (walker->text.first != walker->text.afterLast)) {
memory->free(memory, (URI_CHAR *)walker->text.first);
}
memory->free(memory, walker);
} else {
/* Re-use segment for "" path segment to represent trailing slash, update tail */
URI_TYPE(PathSegment) * const segment = walker;
if (pathOwned && (segment->text.first != segment->text.afterLast)) {
memory->free(memory, (URI_CHAR *)segment->text.first);
}
segment->text.first = URI_FUNC(SafeToPointTo);
segment->text.afterLast = URI_FUNC(SafeToPointTo);
uri->pathHead = segment;
uri->pathTail = segment;
}
if (pathOwned && (prev->text.first != prev->text.afterLast)) {
memory->free(memory, (URI_CHAR *)prev->text.first);
}
memory->free(memory, prev);
walker = nextBackup;
}
} else {
URI_TYPE(PathSegment) * const anotherNextBackup = walker->next;
/* First segment -> update head pointer */
uri->pathHead = walker->next;
if (walker->next != NULL) {
walker->next->reserved = NULL;
} else {
/* Last segment -> update tail */
uri->pathTail = NULL;
}
if (pathOwned && (walker->text.first != walker->text.afterLast)) {
memory->free(memory, (URI_CHAR *)walker->text.first);
}
memory->free(memory, walker);
walker = anotherNextBackup;
}
}
}
break;
}
if (!removeSegment) {
if (walker->next != NULL) {
walker->next->reserved = walker;
} else {
/* Last segment -> update tail */
uri->pathTail = walker;
}
walker = walker->next;
}
} while (walker != NULL);
return URI_TRUE;
}
/* Properly removes "." and ".." path segments */
UriBool URI_FUNC(RemoveDotSegmentsAbsolute)(URI_TYPE(Uri) * uri,
UriMemoryManager * memory) {
const UriBool ABSOLUTE = URI_FALSE;
return URI_FUNC(RemoveDotSegments)(uri, ABSOLUTE, memory);
}
unsigned char URI_FUNC(HexdigToInt)(URI_CHAR hexdig) {
switch (hexdig) {
case _UT('0'):
case _UT('1'):
case _UT('2'):
case _UT('3'):
case _UT('4'):
case _UT('5'):
case _UT('6'):
case _UT('7'):
case _UT('8'):
case _UT('9'):
return (unsigned char)(9 + hexdig - _UT('9'));
case _UT('a'):
case _UT('b'):
case _UT('c'):
case _UT('d'):
case _UT('e'):
case _UT('f'):
return (unsigned char)(15 + hexdig - _UT('f'));
case _UT('A'):
case _UT('B'):
case _UT('C'):
case _UT('D'):
case _UT('E'):
case _UT('F'):
return (unsigned char)(15 + hexdig - _UT('F'));
default:
return 0;
}
}
URI_CHAR URI_FUNC(HexToLetter)(unsigned int value) {
/* Uppercase recommended in section 2.1. of RFC 3986 *
* http://tools.ietf.org/html/rfc3986#section-2.1 */
return URI_FUNC(HexToLetterEx)(value, URI_TRUE);
}
URI_CHAR URI_FUNC(HexToLetterEx)(unsigned int value, UriBool uppercase) {
switch (value) {
case 0: return _UT('0');
case 1: return _UT('1');
case 2: return _UT('2');
case 3: return _UT('3');
case 4: return _UT('4');
case 5: return _UT('5');
case 6: return _UT('6');
case 7: return _UT('7');
case 8: return _UT('8');
case 9: return _UT('9');
case 10: return (uppercase == URI_TRUE) ? _UT('A') : _UT('a');
case 11: return (uppercase == URI_TRUE) ? _UT('B') : _UT('b');
case 12: return (uppercase == URI_TRUE) ? _UT('C') : _UT('c');
case 13: return (uppercase == URI_TRUE) ? _UT('D') : _UT('d');
case 14: return (uppercase == URI_TRUE) ? _UT('E') : _UT('e');
default: return (uppercase == URI_TRUE) ? _UT('F') : _UT('f');
}
}
/* Checks if a URI has the host component set. */
UriBool URI_FUNC(IsHostSet)(const URI_TYPE(Uri) * uri) {
return (uri != NULL)
&& ((uri->hostText.first != NULL)
|| (uri->hostData.ip4 != NULL)
|| (uri->hostData.ip6 != NULL)
|| (uri->hostData.ipFuture.first != NULL)
);
}
/* Copies the path segment list from one URI to another. */
UriBool URI_FUNC(CopyPath)(URI_TYPE(Uri) * dest,
const URI_TYPE(Uri) * source, UriMemoryManager * memory) {
if (source->pathHead == NULL) {
/* No path component */
dest->pathHead = NULL;
dest->pathTail = NULL;
} else {
/* Copy list but not the text contained */
URI_TYPE(PathSegment) * sourceWalker = source->pathHead;
URI_TYPE(PathSegment) * destPrev = NULL;
do {
URI_TYPE(PathSegment) * cur = memory->malloc(memory, sizeof(URI_TYPE(PathSegment)));
if (cur == NULL) {
/* Fix broken list */
if (destPrev != NULL) {
destPrev->next = NULL;
}
return URI_FALSE; /* Raises malloc error */
}
/* From this functions usage we know that *
* the dest URI cannot be uri->owner */
cur->text = sourceWalker->text;
if (destPrev == NULL) {
/* First segment ever */
dest->pathHead = cur;
} else {
destPrev->next = cur;
}
destPrev = cur;
sourceWalker = sourceWalker->next;
} while (sourceWalker != NULL);
dest->pathTail = destPrev;
dest->pathTail->next = NULL;
}
dest->absolutePath = source->absolutePath;
return URI_TRUE;
}
/* Copies the authority part of an URI over to another. */
UriBool URI_FUNC(CopyAuthority)(URI_TYPE(Uri) * dest,
const URI_TYPE(Uri) * source, UriMemoryManager * memory) {
/* From this functions usage we know that *
* the dest URI cannot be uri->owner */
/* Copy userInfo */
dest->userInfo = source->userInfo;
/* Copy hostText */
dest->hostText = source->hostText;
/* Copy hostData */
if (source->hostData.ip4 != NULL) {
dest->hostData.ip4 = memory->malloc(memory, sizeof(UriIp4));
if (dest->hostData.ip4 == NULL) {
return URI_FALSE; /* Raises malloc error */
}
*(dest->hostData.ip4) = *(source->hostData.ip4);
dest->hostData.ip6 = NULL;
dest->hostData.ipFuture.first = NULL;
dest->hostData.ipFuture.afterLast = NULL;
} else if (source->hostData.ip6 != NULL) {
dest->hostData.ip4 = NULL;
dest->hostData.ip6 = memory->malloc(memory, sizeof(UriIp6));
if (dest->hostData.ip6 == NULL) {
return URI_FALSE; /* Raises malloc error */
}
*(dest->hostData.ip6) = *(source->hostData.ip6);
dest->hostData.ipFuture.first = NULL;
dest->hostData.ipFuture.afterLast = NULL;
} else {
dest->hostData.ip4 = NULL;
dest->hostData.ip6 = NULL;
dest->hostData.ipFuture = source->hostData.ipFuture;
}
/* Copy portText */
dest->portText = source->portText;
return URI_TRUE;
}
UriBool URI_FUNC(FixAmbiguity)(URI_TYPE(Uri) * uri,
UriMemoryManager * memory) {
URI_TYPE(PathSegment) * segment;
if ( /* Case 1: absolute path, empty first segment */
(uri->absolutePath
&& (uri->pathHead != NULL)
&& (uri->pathHead->text.afterLast == uri->pathHead->text.first))
/* Case 2: relative path, empty first and second segment */
|| (!uri->absolutePath
&& (uri->pathHead != NULL)
&& (uri->pathHead->next != NULL)
&& (uri->pathHead->text.afterLast == uri->pathHead->text.first)
&& (uri->pathHead->next->text.afterLast == uri->pathHead->next->text.first))) {
/* NOOP */
} else {
return URI_TRUE;
}
segment = memory->malloc(memory, 1 * sizeof(URI_TYPE(PathSegment)));
if (segment == NULL) {
return URI_FALSE; /* Raises malloc error */
}
/* Insert "." segment in front */
segment->next = uri->pathHead;
segment->text.first = URI_FUNC(ConstPwd);
segment->text.afterLast = URI_FUNC(ConstPwd) + 1;
uri->pathHead = segment;
return URI_TRUE;
}
void URI_FUNC(FixEmptyTrailSegment)(URI_TYPE(Uri) * uri,
UriMemoryManager * memory) {
/* Fix path if only one empty segment */
if (!uri->absolutePath
&& !URI_FUNC(IsHostSet)(uri)
&& (uri->pathHead != NULL)
&& (uri->pathHead->next == NULL)
&& (uri->pathHead->text.first == uri->pathHead->text.afterLast)) {
memory->free(memory, uri->pathHead);
uri->pathHead = NULL;
uri->pathTail = NULL;
}
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_445_0 |
crossvul-cpp_data_bad_176_0 | /*
** kernel.c - Kernel module
**
** See Copyright Notice in mruby.h
*/
#include <mruby.h>
#include <mruby/array.h>
#include <mruby/hash.h>
#include <mruby/class.h>
#include <mruby/proc.h>
#include <mruby/string.h>
#include <mruby/variable.h>
#include <mruby/error.h>
#include <mruby/istruct.h>
typedef enum {
NOEX_PUBLIC = 0x00,
NOEX_NOSUPER = 0x01,
NOEX_PRIVATE = 0x02,
NOEX_PROTECTED = 0x04,
NOEX_MASK = 0x06,
NOEX_BASIC = 0x08,
NOEX_UNDEF = NOEX_NOSUPER,
NOEX_MODFUNC = 0x12,
NOEX_SUPER = 0x20,
NOEX_VCALL = 0x40,
NOEX_RESPONDS = 0x80
} mrb_method_flag_t;
MRB_API mrb_bool
mrb_func_basic_p(mrb_state *mrb, mrb_value obj, mrb_sym mid, mrb_func_t func)
{
mrb_method_t m = mrb_method_search(mrb, mrb_class(mrb, obj), mid);
struct RProc *p;
if (MRB_METHOD_UNDEF_P(m)) return FALSE;
if (MRB_METHOD_FUNC_P(m))
return MRB_METHOD_FUNC(m) == func;
p = MRB_METHOD_PROC(m);
if (MRB_PROC_CFUNC_P(p) && (MRB_PROC_CFUNC(p) == func))
return TRUE;
return FALSE;
}
static mrb_bool
mrb_obj_basic_to_s_p(mrb_state *mrb, mrb_value obj)
{
return mrb_func_basic_p(mrb, obj, mrb_intern_lit(mrb, "to_s"), mrb_any_to_s);
}
/* 15.3.1.3.17 */
/*
* call-seq:
* obj.inspect -> string
*
* Returns a string containing a human-readable representation of
* <i>obj</i>. If not overridden and no instance variables, uses the
* <code>to_s</code> method to generate the string.
* <i>obj</i>. If not overridden, uses the <code>to_s</code> method to
* generate the string.
*
* [ 1, 2, 3..4, 'five' ].inspect #=> "[1, 2, 3..4, \"five\"]"
* Time.new.inspect #=> "2008-03-08 19:43:39 +0900"
*/
MRB_API mrb_value
mrb_obj_inspect(mrb_state *mrb, mrb_value obj)
{
if ((mrb_type(obj) == MRB_TT_OBJECT) && mrb_obj_basic_to_s_p(mrb, obj)) {
return mrb_obj_iv_inspect(mrb, mrb_obj_ptr(obj));
}
return mrb_any_to_s(mrb, obj);
}
/* 15.3.1.3.2 */
/*
* call-seq:
* obj === other -> true or false
*
* Case Equality---For class <code>Object</code>, effectively the same
* as calling <code>#==</code>, but typically overridden by descendants
* to provide meaningful semantics in <code>case</code> statements.
*/
static mrb_value
mrb_equal_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "o", &arg);
return mrb_bool_value(mrb_equal(mrb, self, arg));
}
/* 15.3.1.3.3 */
/* 15.3.1.3.33 */
/*
* Document-method: __id__
* Document-method: object_id
*
* call-seq:
* obj.__id__ -> fixnum
* obj.object_id -> fixnum
*
* Returns an integer identifier for <i>obj</i>. The same number will
* be returned on all calls to <code>id</code> for a given object, and
* no two active objects will share an id.
* <code>Object#object_id</code> is a different concept from the
* <code>:name</code> notation, which returns the symbol id of
* <code>name</code>. Replaces the deprecated <code>Object#id</code>.
*/
mrb_value
mrb_obj_id_m(mrb_state *mrb, mrb_value self)
{
return mrb_fixnum_value(mrb_obj_id(self));
}
/* 15.3.1.2.2 */
/* 15.3.1.2.5 */
/* 15.3.1.3.6 */
/* 15.3.1.3.25 */
/*
* call-seq:
* block_given? -> true or false
* iterator? -> true or false
*
* Returns <code>true</code> if <code>yield</code> would execute a
* block in the current context. The <code>iterator?</code> form
* is mildly deprecated.
*
* def try
* if block_given?
* yield
* else
* "no block"
* end
* end
* try #=> "no block"
* try { "hello" } #=> "hello"
* try do "hello" end #=> "hello"
*/
static mrb_value
mrb_f_block_given_p_m(mrb_state *mrb, mrb_value self)
{
mrb_callinfo *ci = &mrb->c->ci[-1];
mrb_callinfo *cibase = mrb->c->cibase;
mrb_value *bp;
struct RProc *p;
if (ci <= cibase) {
/* toplevel does not have block */
return mrb_false_value();
}
p = ci->proc;
/* search method/class/module proc */
while (p) {
if (MRB_PROC_SCOPE_P(p)) break;
p = p->upper;
}
if (p == NULL) return mrb_false_value();
/* search ci corresponding to proc */
while (cibase < ci) {
if (ci->proc == p) break;
ci--;
}
if (ci == cibase) {
return mrb_false_value();
}
else if (ci->env) {
struct REnv *e = ci->env;
int bidx;
/* top-level does not have block slot (always false) */
if (e->stack == mrb->c->stbase)
return mrb_false_value();
/* use saved block arg position */
bidx = MRB_ENV_BIDX(e);
/* bidx may be useless (e.g. define_method) */
if (bidx >= MRB_ENV_STACK_LEN(e))
return mrb_false_value();
bp = &e->stack[bidx];
}
else {
bp = ci[1].stackent+1;
if (ci->argc >= 0) {
bp += ci->argc;
}
else {
bp++;
}
}
if (mrb_nil_p(*bp))
return mrb_false_value();
return mrb_true_value();
}
/* 15.3.1.3.7 */
/*
* call-seq:
* obj.class -> class
*
* Returns the class of <i>obj</i>. This method must always be
* called with an explicit receiver, as <code>class</code> is also a
* reserved word in Ruby.
*
* 1.class #=> Fixnum
* self.class #=> Object
*/
static mrb_value
mrb_obj_class_m(mrb_state *mrb, mrb_value self)
{
return mrb_obj_value(mrb_obj_class(mrb, self));
}
static struct RClass*
mrb_singleton_class_clone(mrb_state *mrb, mrb_value obj)
{
struct RClass *klass = mrb_basic_ptr(obj)->c;
if (klass->tt != MRB_TT_SCLASS)
return klass;
else {
/* copy singleton(unnamed) class */
struct RClass *clone = (struct RClass*)mrb_obj_alloc(mrb, klass->tt, mrb->class_class);
switch (mrb_type(obj)) {
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
break;
default:
clone->c = mrb_singleton_class_clone(mrb, mrb_obj_value(klass));
break;
}
clone->super = klass->super;
if (klass->iv) {
mrb_iv_copy(mrb, mrb_obj_value(clone), mrb_obj_value(klass));
mrb_obj_iv_set(mrb, (struct RObject*)clone, mrb_intern_lit(mrb, "__attached__"), obj);
}
if (klass->mt) {
clone->mt = kh_copy(mt, mrb, klass->mt);
}
else {
clone->mt = kh_init(mt, mrb);
}
clone->tt = MRB_TT_SCLASS;
return clone;
}
}
static void
copy_class(mrb_state *mrb, mrb_value dst, mrb_value src)
{
struct RClass *dc = mrb_class_ptr(dst);
struct RClass *sc = mrb_class_ptr(src);
/* if the origin is not the same as the class, then the origin and
the current class need to be copied */
if (sc->flags & MRB_FLAG_IS_PREPENDED) {
struct RClass *c0 = sc->super;
struct RClass *c1 = dc;
/* copy prepended iclasses */
while (!(c0->flags & MRB_FLAG_IS_ORIGIN)) {
c1->super = mrb_class_ptr(mrb_obj_dup(mrb, mrb_obj_value(c0)));
c1 = c1->super;
c0 = c0->super;
}
c1->super = mrb_class_ptr(mrb_obj_dup(mrb, mrb_obj_value(c0)));
c1->super->flags |= MRB_FLAG_IS_ORIGIN;
}
if (sc->mt) {
dc->mt = kh_copy(mt, mrb, sc->mt);
}
else {
dc->mt = kh_init(mt, mrb);
}
dc->super = sc->super;
MRB_SET_INSTANCE_TT(dc, MRB_INSTANCE_TT(sc));
}
static void
init_copy(mrb_state *mrb, mrb_value dest, mrb_value obj)
{
switch (mrb_type(obj)) {
case MRB_TT_ICLASS:
copy_class(mrb, dest, obj);
return;
case MRB_TT_CLASS:
case MRB_TT_MODULE:
copy_class(mrb, dest, obj);
mrb_iv_copy(mrb, dest, obj);
mrb_iv_remove(mrb, dest, mrb_intern_lit(mrb, "__classname__"));
break;
case MRB_TT_OBJECT:
case MRB_TT_SCLASS:
case MRB_TT_HASH:
case MRB_TT_DATA:
case MRB_TT_EXCEPTION:
mrb_iv_copy(mrb, dest, obj);
break;
case MRB_TT_ISTRUCT:
mrb_istruct_copy(dest, obj);
break;
default:
break;
}
mrb_funcall(mrb, dest, "initialize_copy", 1, obj);
}
/* 15.3.1.3.8 */
/*
* call-seq:
* obj.clone -> an_object
*
* Produces a shallow copy of <i>obj</i>---the instance variables of
* <i>obj</i> are copied, but not the objects they reference. Copies
* the frozen state of <i>obj</i>. See also the discussion
* under <code>Object#dup</code>.
*
* class Klass
* attr_accessor :str
* end
* s1 = Klass.new #=> #<Klass:0x401b3a38>
* s1.str = "Hello" #=> "Hello"
* s2 = s1.clone #=> #<Klass:0x401b3998 @str="Hello">
* s2.str[1,4] = "i" #=> "i"
* s1.inspect #=> "#<Klass:0x401b3a38 @str=\"Hi\">"
* s2.inspect #=> "#<Klass:0x401b3998 @str=\"Hi\">"
*
* This method may have class-specific behavior. If so, that
* behavior will be documented under the #+initialize_copy+ method of
* the class.
*
* Some Class(True False Nil Symbol Fixnum Float) Object cannot clone.
*/
MRB_API mrb_value
mrb_obj_clone(mrb_state *mrb, mrb_value self)
{
struct RObject *p;
mrb_value clone;
if (mrb_immediate_p(self)) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't clone %S", self);
}
if (mrb_type(self) == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't clone singleton class");
}
p = (struct RObject*)mrb_obj_alloc(mrb, mrb_type(self), mrb_obj_class(mrb, self));
p->c = mrb_singleton_class_clone(mrb, self);
mrb_field_write_barrier(mrb, (struct RBasic*)p, (struct RBasic*)p->c);
clone = mrb_obj_value(p);
init_copy(mrb, clone, self);
p->flags = mrb_obj_ptr(self)->flags;
return clone;
}
/* 15.3.1.3.9 */
/*
* call-seq:
* obj.dup -> an_object
*
* Produces a shallow copy of <i>obj</i>---the instance variables of
* <i>obj</i> are copied, but not the objects they reference.
* <code>dup</code> copies the frozen state of <i>obj</i>. See also
* the discussion under <code>Object#clone</code>. In general,
* <code>clone</code> and <code>dup</code> may have different semantics
* in descendant classes. While <code>clone</code> is used to duplicate
* an object, including its internal state, <code>dup</code> typically
* uses the class of the descendant object to create the new instance.
*
* This method may have class-specific behavior. If so, that
* behavior will be documented under the #+initialize_copy+ method of
* the class.
*/
MRB_API mrb_value
mrb_obj_dup(mrb_state *mrb, mrb_value obj)
{
struct RBasic *p;
mrb_value dup;
if (mrb_immediate_p(obj)) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't dup %S", obj);
}
if (mrb_type(obj) == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't dup singleton class");
}
p = mrb_obj_alloc(mrb, mrb_type(obj), mrb_obj_class(mrb, obj));
dup = mrb_obj_value(p);
init_copy(mrb, dup, obj);
return dup;
}
static mrb_value
mrb_obj_extend(mrb_state *mrb, mrb_int argc, mrb_value *argv, mrb_value obj)
{
mrb_int i;
if (argc == 0) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "wrong number of arguments (at least 1)");
}
for (i = 0; i < argc; i++) {
mrb_check_type(mrb, argv[i], MRB_TT_MODULE);
}
while (argc--) {
mrb_funcall(mrb, argv[argc], "extend_object", 1, obj);
mrb_funcall(mrb, argv[argc], "extended", 1, obj);
}
return obj;
}
/* 15.3.1.3.13 */
/*
* call-seq:
* obj.extend(module, ...) -> obj
*
* Adds to _obj_ the instance methods from each module given as a
* parameter.
*
* module Mod
* def hello
* "Hello from Mod.\n"
* end
* end
*
* class Klass
* def hello
* "Hello from Klass.\n"
* end
* end
*
* k = Klass.new
* k.hello #=> "Hello from Klass.\n"
* k.extend(Mod) #=> #<Klass:0x401b3bc8>
* k.hello #=> "Hello from Mod.\n"
*/
static mrb_value
mrb_obj_extend_m(mrb_state *mrb, mrb_value self)
{
mrb_value *argv;
mrb_int argc;
mrb_get_args(mrb, "*", &argv, &argc);
return mrb_obj_extend(mrb, argc, argv, self);
}
static mrb_value
mrb_obj_freeze(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return self;
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
MRB_SET_FROZEN_FLAG(b);
}
return self;
}
static mrb_value
mrb_obj_frozen(mrb_state *mrb, mrb_value self)
{
struct RBasic *b;
switch (mrb_type(self)) {
case MRB_TT_FALSE:
case MRB_TT_TRUE:
case MRB_TT_FIXNUM:
case MRB_TT_SYMBOL:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
return mrb_true_value();
default:
break;
}
b = mrb_basic_ptr(self);
if (!MRB_FROZEN_P(b)) {
return mrb_false_value();
}
return mrb_true_value();
}
/* 15.3.1.3.15 */
/*
* call-seq:
* obj.hash -> fixnum
*
* Generates a <code>Fixnum</code> hash value for this object. This
* function must have the property that <code>a.eql?(b)</code> implies
* <code>a.hash == b.hash</code>. The hash value is used by class
* <code>Hash</code>. Any hash value that exceeds the capacity of a
* <code>Fixnum</code> will be truncated before being used.
*/
MRB_API mrb_value
mrb_obj_hash(mrb_state *mrb, mrb_value self)
{
return mrb_fixnum_value(mrb_obj_id(self));
}
/* 15.3.1.3.16 */
static mrb_value
mrb_obj_init_copy(mrb_state *mrb, mrb_value self)
{
mrb_value orig;
mrb_get_args(mrb, "o", &orig);
if (mrb_obj_equal(mrb, self, orig)) return self;
if ((mrb_type(self) != mrb_type(orig)) || (mrb_obj_class(mrb, self) != mrb_obj_class(mrb, orig))) {
mrb_raise(mrb, E_TYPE_ERROR, "initialize_copy should take same class object");
}
return self;
}
MRB_API mrb_bool
mrb_obj_is_instance_of(mrb_state *mrb, mrb_value obj, struct RClass* c)
{
if (mrb_obj_class(mrb, obj) == c) return TRUE;
return FALSE;
}
/* 15.3.1.3.19 */
/*
* call-seq:
* obj.instance_of?(class) -> true or false
*
* Returns <code>true</code> if <i>obj</i> is an instance of the given
* class. See also <code>Object#kind_of?</code>.
*/
static mrb_value
obj_is_instance_of(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "C", &arg);
return mrb_bool_value(mrb_obj_is_instance_of(mrb, self, mrb_class_ptr(arg)));
}
/* 15.3.1.3.20 */
/*
* call-seq:
* obj.instance_variable_defined?(symbol) -> true or false
*
* Returns <code>true</code> if the given instance variable is
* defined in <i>obj</i>.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_defined?(:@a) #=> true
* fred.instance_variable_defined?("@b") #=> true
* fred.instance_variable_defined?("@c") #=> false
*/
static mrb_value
mrb_obj_ivar_defined(mrb_state *mrb, mrb_value self)
{
mrb_sym sym;
mrb_get_args(mrb, "n", &sym);
mrb_iv_check(mrb, sym);
return mrb_bool_value(mrb_iv_defined(mrb, self, sym));
}
/* 15.3.1.3.21 */
/*
* call-seq:
* obj.instance_variable_get(symbol) -> obj
*
* Returns the value of the given instance variable, or nil if the
* instance variable is not set. The <code>@</code> part of the
* variable name should be included for regular instance
* variables. Throws a <code>NameError</code> exception if the
* supplied symbol is not valid as an instance variable name.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_get(:@a) #=> "cat"
* fred.instance_variable_get("@b") #=> 99
*/
static mrb_value
mrb_obj_ivar_get(mrb_state *mrb, mrb_value self)
{
mrb_sym iv_name;
mrb_get_args(mrb, "n", &iv_name);
mrb_iv_check(mrb, iv_name);
return mrb_iv_get(mrb, self, iv_name);
}
/* 15.3.1.3.22 */
/*
* call-seq:
* obj.instance_variable_set(symbol, obj) -> obj
*
* Sets the instance variable names by <i>symbol</i> to
* <i>object</i>, thereby frustrating the efforts of the class's
* author to attempt to provide proper encapsulation. The variable
* did not have to exist prior to this call.
*
* class Fred
* def initialize(p1, p2)
* @a, @b = p1, p2
* end
* end
* fred = Fred.new('cat', 99)
* fred.instance_variable_set(:@a, 'dog') #=> "dog"
* fred.instance_variable_set(:@c, 'cat') #=> "cat"
* fred.inspect #=> "#<Fred:0x401b3da8 @a=\"dog\", @b=99, @c=\"cat\">"
*/
static mrb_value
mrb_obj_ivar_set(mrb_state *mrb, mrb_value self)
{
mrb_sym iv_name;
mrb_value val;
mrb_get_args(mrb, "no", &iv_name, &val);
mrb_iv_check(mrb, iv_name);
mrb_iv_set(mrb, self, iv_name, val);
return val;
}
/* 15.3.1.3.24 */
/* 15.3.1.3.26 */
/*
* call-seq:
* obj.is_a?(class) -> true or false
* obj.kind_of?(class) -> true or false
*
* Returns <code>true</code> if <i>class</i> is the class of
* <i>obj</i>, or if <i>class</i> is one of the superclasses of
* <i>obj</i> or modules included in <i>obj</i>.
*
* module M; end
* class A
* include M
* end
* class B < A; end
* class C < B; end
* b = B.new
* b.instance_of? A #=> false
* b.instance_of? B #=> true
* b.instance_of? C #=> false
* b.instance_of? M #=> false
* b.kind_of? A #=> true
* b.kind_of? B #=> true
* b.kind_of? C #=> false
* b.kind_of? M #=> true
*/
static mrb_value
mrb_obj_is_kind_of_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "C", &arg);
return mrb_bool_value(mrb_obj_is_kind_of(mrb, self, mrb_class_ptr(arg)));
}
KHASH_DECLARE(st, mrb_sym, char, FALSE)
KHASH_DEFINE(st, mrb_sym, char, FALSE, kh_int_hash_func, kh_int_hash_equal)
static void
method_entry_loop(mrb_state *mrb, struct RClass* klass, khash_t(st)* set)
{
khint_t i;
khash_t(mt) *h = klass->mt;
if (!h || kh_size(h) == 0) return;
for (i=0;i<kh_end(h);i++) {
if (kh_exist(h, i)) {
mrb_method_t m = kh_value(h, i);
if (MRB_METHOD_UNDEF_P(m)) continue;
kh_put(st, mrb, set, kh_key(h, i));
}
}
}
mrb_value
mrb_class_instance_method_list(mrb_state *mrb, mrb_bool recur, struct RClass* klass, int obj)
{
khint_t i;
mrb_value ary;
mrb_bool prepended = FALSE;
struct RClass* oldklass;
khash_t(st)* set = kh_init(st, mrb);
if (!recur && (klass->flags & MRB_FLAG_IS_PREPENDED)) {
MRB_CLASS_ORIGIN(klass);
prepended = TRUE;
}
oldklass = 0;
while (klass && (klass != oldklass)) {
method_entry_loop(mrb, klass, set);
if ((klass->tt == MRB_TT_ICLASS && !prepended) ||
(klass->tt == MRB_TT_SCLASS)) {
}
else {
if (!recur) break;
}
oldklass = klass;
klass = klass->super;
}
ary = mrb_ary_new_capa(mrb, kh_size(set));
for (i=0;i<kh_end(set);i++) {
if (kh_exist(set, i)) {
mrb_ary_push(mrb, ary, mrb_symbol_value(kh_key(set, i)));
}
}
kh_destroy(st, mrb, set);
return ary;
}
static mrb_value
mrb_obj_singleton_methods(mrb_state *mrb, mrb_bool recur, mrb_value obj)
{
khint_t i;
mrb_value ary;
struct RClass* klass;
khash_t(st)* set = kh_init(st, mrb);
klass = mrb_class(mrb, obj);
if (klass && (klass->tt == MRB_TT_SCLASS)) {
method_entry_loop(mrb, klass, set);
klass = klass->super;
}
if (recur) {
while (klass && ((klass->tt == MRB_TT_SCLASS) || (klass->tt == MRB_TT_ICLASS))) {
method_entry_loop(mrb, klass, set);
klass = klass->super;
}
}
ary = mrb_ary_new(mrb);
for (i=0;i<kh_end(set);i++) {
if (kh_exist(set, i)) {
mrb_ary_push(mrb, ary, mrb_symbol_value(kh_key(set, i)));
}
}
kh_destroy(st, mrb, set);
return ary;
}
static mrb_value
mrb_obj_methods(mrb_state *mrb, mrb_bool recur, mrb_value obj, mrb_method_flag_t flag)
{
return mrb_class_instance_method_list(mrb, recur, mrb_class(mrb, obj), 0);
}
/* 15.3.1.3.31 */
/*
* call-seq:
* obj.methods -> array
*
* Returns a list of the names of methods publicly accessible in
* <i>obj</i>. This will include all the methods accessible in
* <i>obj</i>'s ancestors.
*
* class Klass
* def kMethod()
* end
* end
* k = Klass.new
* k.methods[0..9] #=> [:kMethod, :respond_to?, :nil?, :is_a?,
* # :class, :instance_variable_set,
* # :methods, :extend, :__send__, :instance_eval]
* k.methods.length #=> 42
*/
static mrb_value
mrb_obj_methods_m(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, (mrb_method_flag_t)0); /* everything but private */
}
/* 15.3.1.3.32 */
/*
* call_seq:
* nil.nil? -> true
* <anything_else>.nil? -> false
*
* Only the object <i>nil</i> responds <code>true</code> to <code>nil?</code>.
*/
static mrb_value
mrb_false(mrb_state *mrb, mrb_value self)
{
return mrb_false_value();
}
/* 15.3.1.3.36 */
/*
* call-seq:
* obj.private_methods(all=true) -> array
*
* Returns the list of private methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_private_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PRIVATE); /* private attribute not define */
}
/* 15.3.1.3.37 */
/*
* call-seq:
* obj.protected_methods(all=true) -> array
*
* Returns the list of protected methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_protected_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PROTECTED); /* protected attribute not define */
}
/* 15.3.1.3.38 */
/*
* call-seq:
* obj.public_methods(all=true) -> array
*
* Returns the list of public methods accessible to <i>obj</i>. If
* the <i>all</i> parameter is set to <code>false</code>, only those methods
* in the receiver will be listed.
*/
static mrb_value
mrb_obj_public_methods(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_methods(mrb, recur, self, NOEX_PUBLIC); /* public attribute not define */
}
/* 15.3.1.2.12 */
/* 15.3.1.3.40 */
/*
* call-seq:
* raise
* raise(string)
* raise(exception [, string])
*
* With no arguments, raises a <code>RuntimeError</code>
* With a single +String+ argument, raises a
* +RuntimeError+ with the string as a message. Otherwise,
* the first parameter should be the name of an +Exception+
* class (or an object that returns an +Exception+ object when sent
* an +exception+ message). The optional second parameter sets the
* message associated with the exception, and the third parameter is an
* array of callback information. Exceptions are caught by the
* +rescue+ clause of <code>begin...end</code> blocks.
*
* raise "Failed to create socket"
* raise ArgumentError, "No parameters", caller
*/
MRB_API mrb_value
mrb_f_raise(mrb_state *mrb, mrb_value self)
{
mrb_value a[2], exc;
mrb_int argc;
argc = mrb_get_args(mrb, "|oo", &a[0], &a[1]);
switch (argc) {
case 0:
mrb_raise(mrb, E_RUNTIME_ERROR, "");
break;
case 1:
if (mrb_string_p(a[0])) {
a[1] = a[0];
argc = 2;
a[0] = mrb_obj_value(E_RUNTIME_ERROR);
}
/* fall through */
default:
exc = mrb_make_exception(mrb, argc, a);
mrb_exc_raise(mrb, exc);
break;
}
return mrb_nil_value(); /* not reached */
}
static mrb_value
mrb_krn_class_defined(mrb_state *mrb, mrb_value self)
{
mrb_value str;
mrb_get_args(mrb, "S", &str);
return mrb_bool_value(mrb_class_defined(mrb, RSTRING_PTR(str)));
}
/* 15.3.1.3.41 */
/*
* call-seq:
* obj.remove_instance_variable(symbol) -> obj
*
* Removes the named instance variable from <i>obj</i>, returning that
* variable's value.
*
* class Dummy
* attr_reader :var
* def initialize
* @var = 99
* end
* def remove
* remove_instance_variable(:@var)
* end
* end
* d = Dummy.new
* d.var #=> 99
* d.remove #=> 99
* d.var #=> nil
*/
static mrb_value
mrb_obj_remove_instance_variable(mrb_state *mrb, mrb_value self)
{
mrb_sym sym;
mrb_value val;
mrb_get_args(mrb, "n", &sym);
mrb_iv_check(mrb, sym);
val = mrb_iv_remove(mrb, self, sym);
if (mrb_undef_p(val)) {
mrb_name_error(mrb, sym, "instance variable %S not defined", mrb_sym2str(mrb, sym));
}
return val;
}
void
mrb_method_missing(mrb_state *mrb, mrb_sym name, mrb_value self, mrb_value args)
{
mrb_no_method_error(mrb, name, args, "undefined method '%S'", mrb_sym2str(mrb, name));
}
/* 15.3.1.3.30 */
/*
* call-seq:
* obj.method_missing(symbol [, *args] ) -> result
*
* Invoked by Ruby when <i>obj</i> is sent a message it cannot handle.
* <i>symbol</i> is the symbol for the method called, and <i>args</i>
* are any arguments that were passed to it. By default, the interpreter
* raises an error when this method is called. However, it is possible
* to override the method to provide more dynamic behavior.
* If it is decided that a particular method should not be handled, then
* <i>super</i> should be called, so that ancestors can pick up the
* missing method.
* The example below creates
* a class <code>Roman</code>, which responds to methods with names
* consisting of roman numerals, returning the corresponding integer
* values.
*
* class Roman
* def romanToInt(str)
* # ...
* end
* def method_missing(methId)
* str = methId.id2name
* romanToInt(str)
* end
* end
*
* r = Roman.new
* r.iv #=> 4
* r.xxiii #=> 23
* r.mm #=> 2000
*/
#ifdef MRB_DEFAULT_METHOD_MISSING
static mrb_value
mrb_obj_missing(mrb_state *mrb, mrb_value mod)
{
mrb_sym name;
mrb_value *a;
mrb_int alen;
mrb_get_args(mrb, "n*!", &name, &a, &alen);
mrb_method_missing(mrb, name, mod, mrb_ary_new_from_values(mrb, alen, a));
/* not reached */
return mrb_nil_value();
}
#endif
static inline mrb_bool
basic_obj_respond_to(mrb_state *mrb, mrb_value obj, mrb_sym id, int pub)
{
return mrb_respond_to(mrb, obj, id);
}
/* 15.3.1.3.43 */
/*
* call-seq:
* obj.respond_to?(symbol, include_private=false) -> true or false
*
* Returns +true+ if _obj_ responds to the given
* method. Private methods are included in the search only if the
* optional second parameter evaluates to +true+.
*
* If the method is not implemented,
* as Process.fork on Windows, File.lchmod on GNU/Linux, etc.,
* false is returned.
*
* If the method is not defined, <code>respond_to_missing?</code>
* method is called and the result is returned.
*/
static mrb_value
obj_respond_to(mrb_state *mrb, mrb_value self)
{
mrb_value mid;
mrb_sym id, rtm_id;
mrb_bool priv = FALSE, respond_to_p = TRUE;
mrb_get_args(mrb, "o|b", &mid, &priv);
if (mrb_symbol_p(mid)) {
id = mrb_symbol(mid);
}
else {
mrb_value tmp;
if (mrb_string_p(mid)) {
tmp = mrb_check_intern_str(mrb, mid);
}
else {
tmp = mrb_check_string_type(mrb, mid);
if (mrb_nil_p(tmp)) {
tmp = mrb_inspect(mrb, mid);
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not a symbol", tmp);
}
tmp = mrb_check_intern_str(mrb, tmp);
}
if (mrb_nil_p(tmp)) {
respond_to_p = FALSE;
}
else {
id = mrb_symbol(tmp);
}
}
if (respond_to_p) {
respond_to_p = basic_obj_respond_to(mrb, self, id, !priv);
}
if (!respond_to_p) {
rtm_id = mrb_intern_lit(mrb, "respond_to_missing?");
if (basic_obj_respond_to(mrb, self, rtm_id, !priv)) {
mrb_value args[2], v;
args[0] = mid;
args[1] = mrb_bool_value(priv);
v = mrb_funcall_argv(mrb, self, rtm_id, 2, args);
return mrb_bool_value(mrb_bool(v));
}
}
return mrb_bool_value(respond_to_p);
}
/* 15.3.1.3.45 */
/*
* call-seq:
* obj.singleton_methods(all=true) -> array
*
* Returns an array of the names of singleton methods for <i>obj</i>.
* If the optional <i>all</i> parameter is true, the list will include
* methods in modules included in <i>obj</i>.
* Only public and protected singleton methods are returned.
*
* module Other
* def three() end
* end
*
* class Single
* def Single.four() end
* end
*
* a = Single.new
*
* def a.one()
* end
*
* class << a
* include Other
* def two()
* end
* end
*
* Single.singleton_methods #=> [:four]
* a.singleton_methods(false) #=> [:two, :one]
* a.singleton_methods #=> [:two, :one, :three]
*/
static mrb_value
mrb_obj_singleton_methods_m(mrb_state *mrb, mrb_value self)
{
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_obj_singleton_methods(mrb, recur, self);
}
static mrb_value
mod_define_singleton_method(mrb_state *mrb, mrb_value self)
{
struct RProc *p;
mrb_method_t m;
mrb_sym mid;
mrb_value blk = mrb_nil_value();
mrb_get_args(mrb, "n&", &mid, &blk);
if (mrb_nil_p(blk)) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
}
p = (struct RProc*)mrb_obj_alloc(mrb, MRB_TT_PROC, mrb->proc_class);
mrb_proc_copy(p, mrb_proc_ptr(blk));
p->flags |= MRB_PROC_STRICT;
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, mrb_class_ptr(mrb_singleton_class(mrb, self)), mid, m);
return mrb_symbol_value(mid);
}
static mrb_value
mrb_obj_ceqq(mrb_state *mrb, mrb_value self)
{
mrb_value v;
mrb_int i, len;
mrb_sym eqq = mrb_intern_lit(mrb, "===");
mrb_value ary = mrb_ary_splat(mrb, self);
mrb_get_args(mrb, "o", &v);
len = RARRAY_LEN(ary);
for (i=0; i<len; i++) {
mrb_value c = mrb_funcall_argv(mrb, mrb_ary_entry(ary, i), eqq, 1, &v);
if (mrb_test(c)) return mrb_true_value();
}
return mrb_false_value();
}
/* 15.3.1.2.7 */
/*
* call-seq:
* local_variables -> array
*
* Returns the names of local variables in the current scope.
*
* [mruby limitation]
* If variable symbol information was stripped out from
* compiled binary files using `mruby-strip -l`, this
* method always returns an empty array.
*/
static mrb_value
mrb_local_variables(mrb_state *mrb, mrb_value self)
{
struct RProc *proc;
mrb_irep *irep;
mrb_value vars;
size_t i;
proc = mrb->c->ci[-1].proc;
if (MRB_PROC_CFUNC_P(proc)) {
return mrb_ary_new(mrb);
}
vars = mrb_hash_new(mrb);
while (proc) {
if (MRB_PROC_CFUNC_P(proc)) break;
irep = proc->body.irep;
if (!irep->lv) break;
for (i = 0; i + 1 < irep->nlocals; ++i) {
if (irep->lv[i].name) {
mrb_hash_set(mrb, vars, mrb_symbol_value(irep->lv[i].name), mrb_true_value());
}
}
if (!MRB_PROC_ENV_P(proc)) break;
proc = proc->upper;
//if (MRB_PROC_SCOPE_P(proc)) break;
if (!proc->c) break;
}
return mrb_hash_keys(mrb, vars);
}
mrb_value mrb_obj_equal_m(mrb_state *mrb, mrb_value);
void
mrb_init_kernel(mrb_state *mrb)
{
struct RClass *krn;
mrb->kernel_module = krn = mrb_define_module(mrb, "Kernel"); /* 15.3.1 */
mrb_define_class_method(mrb, krn, "block_given?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.2.2 */
mrb_define_class_method(mrb, krn, "global_variables", mrb_f_global_variables, MRB_ARGS_NONE()); /* 15.3.1.2.4 */
mrb_define_class_method(mrb, krn, "iterator?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.2.5 */
mrb_define_class_method(mrb, krn, "local_variables", mrb_local_variables, MRB_ARGS_NONE()); /* 15.3.1.2.7 */
; /* 15.3.1.2.11 */
mrb_define_class_method(mrb, krn, "raise", mrb_f_raise, MRB_ARGS_OPT(2)); /* 15.3.1.2.12 */
mrb_define_method(mrb, krn, "singleton_class", mrb_singleton_class, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "===", mrb_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.2 */
mrb_define_method(mrb, krn, "block_given?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.3.6 */
mrb_define_method(mrb, krn, "class", mrb_obj_class_m, MRB_ARGS_NONE()); /* 15.3.1.3.7 */
mrb_define_method(mrb, krn, "clone", mrb_obj_clone, MRB_ARGS_NONE()); /* 15.3.1.3.8 */
mrb_define_method(mrb, krn, "dup", mrb_obj_dup, MRB_ARGS_NONE()); /* 15.3.1.3.9 */
mrb_define_method(mrb, krn, "eql?", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.10 */
mrb_define_method(mrb, krn, "equal?", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.11 */
mrb_define_method(mrb, krn, "extend", mrb_obj_extend_m, MRB_ARGS_ANY()); /* 15.3.1.3.13 */
mrb_define_method(mrb, krn, "freeze", mrb_obj_freeze, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "frozen?", mrb_obj_frozen, MRB_ARGS_NONE());
mrb_define_method(mrb, krn, "global_variables", mrb_f_global_variables, MRB_ARGS_NONE()); /* 15.3.1.3.14 */
mrb_define_method(mrb, krn, "hash", mrb_obj_hash, MRB_ARGS_NONE()); /* 15.3.1.3.15 */
mrb_define_method(mrb, krn, "initialize_copy", mrb_obj_init_copy, MRB_ARGS_REQ(1)); /* 15.3.1.3.16 */
mrb_define_method(mrb, krn, "inspect", mrb_obj_inspect, MRB_ARGS_NONE()); /* 15.3.1.3.17 */
mrb_define_method(mrb, krn, "instance_of?", obj_is_instance_of, MRB_ARGS_REQ(1)); /* 15.3.1.3.19 */
mrb_define_method(mrb, krn, "instance_variable_defined?", mrb_obj_ivar_defined, MRB_ARGS_REQ(1)); /* 15.3.1.3.20 */
mrb_define_method(mrb, krn, "instance_variable_get", mrb_obj_ivar_get, MRB_ARGS_REQ(1)); /* 15.3.1.3.21 */
mrb_define_method(mrb, krn, "instance_variable_set", mrb_obj_ivar_set, MRB_ARGS_REQ(2)); /* 15.3.1.3.22 */
mrb_define_method(mrb, krn, "instance_variables", mrb_obj_instance_variables, MRB_ARGS_NONE()); /* 15.3.1.3.23 */
mrb_define_method(mrb, krn, "is_a?", mrb_obj_is_kind_of_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.24 */
mrb_define_method(mrb, krn, "iterator?", mrb_f_block_given_p_m, MRB_ARGS_NONE()); /* 15.3.1.3.25 */
mrb_define_method(mrb, krn, "kind_of?", mrb_obj_is_kind_of_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.26 */
mrb_define_method(mrb, krn, "local_variables", mrb_local_variables, MRB_ARGS_NONE()); /* 15.3.1.3.28 */
#ifdef MRB_DEFAULT_METHOD_MISSING
mrb_define_method(mrb, krn, "method_missing", mrb_obj_missing, MRB_ARGS_ANY()); /* 15.3.1.3.30 */
#endif
mrb_define_method(mrb, krn, "methods", mrb_obj_methods_m, MRB_ARGS_OPT(1)); /* 15.3.1.3.31 */
mrb_define_method(mrb, krn, "nil?", mrb_false, MRB_ARGS_NONE()); /* 15.3.1.3.32 */
mrb_define_method(mrb, krn, "object_id", mrb_obj_id_m, MRB_ARGS_NONE()); /* 15.3.1.3.33 */
mrb_define_method(mrb, krn, "private_methods", mrb_obj_private_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.36 */
mrb_define_method(mrb, krn, "protected_methods", mrb_obj_protected_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.37 */
mrb_define_method(mrb, krn, "public_methods", mrb_obj_public_methods, MRB_ARGS_OPT(1)); /* 15.3.1.3.38 */
mrb_define_method(mrb, krn, "raise", mrb_f_raise, MRB_ARGS_ANY()); /* 15.3.1.3.40 */
mrb_define_method(mrb, krn, "remove_instance_variable", mrb_obj_remove_instance_variable,MRB_ARGS_REQ(1)); /* 15.3.1.3.41 */
mrb_define_method(mrb, krn, "respond_to?", obj_respond_to, MRB_ARGS_ANY()); /* 15.3.1.3.43 */
mrb_define_method(mrb, krn, "send", mrb_f_send, MRB_ARGS_ANY()); /* 15.3.1.3.44 */
mrb_define_method(mrb, krn, "singleton_methods", mrb_obj_singleton_methods_m, MRB_ARGS_OPT(1)); /* 15.3.1.3.45 */
mrb_define_method(mrb, krn, "define_singleton_method", mod_define_singleton_method, MRB_ARGS_ANY());
mrb_define_method(mrb, krn, "to_s", mrb_any_to_s, MRB_ARGS_NONE()); /* 15.3.1.3.46 */
mrb_define_method(mrb, krn, "__case_eqq", mrb_obj_ceqq, MRB_ARGS_REQ(1)); /* internal */
mrb_define_method(mrb, krn, "class_defined?", mrb_krn_class_defined, MRB_ARGS_REQ(1));
mrb_include_module(mrb, mrb->object_class, mrb->kernel_module);
mrb_alias_method(mrb, mrb->module_class, mrb_intern_lit(mrb, "dup"), mrb_intern_lit(mrb, "clone"));
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_176_0 |
crossvul-cpp_data_bad_5488_2 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-476/c/bad_5488_2 |
crossvul-cpp_data_bad_624_0 | /*
* TUN - Universal TUN/TAP device driver.
* Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
*/
/*
* Changes:
*
* Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
* Add TUNSETLINK ioctl to set the link encapsulation
*
* Mark Smith <markzzzsmith@yahoo.com.au>
* Use eth_random_addr() for tap MAC address.
*
* Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
* Fixes in packet dropping, queue length setting and queue wakeup.
* Increased default tx queue length.
* Added ethtool API.
* Minor cleanups
*
* Daniel Podlejski <underley@underley.eu.org>
* Modifications for 2.3.99-pre5 kernel.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "tun"
#define DRV_VERSION "1.6"
#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched/signal.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/miscdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
#include <linux/skb_array.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/uaccess.h>
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
#ifdef TUN_DEBUG
static int debug;
#define tun_debug(level, tun, fmt, args...) \
do { \
if (tun->debug) \
netdev_printk(level, tun->dev, fmt, ##args); \
} while (0)
#define DBG1(level, fmt, args...) \
do { \
if (debug == 2) \
printk(level fmt, ##args); \
} while (0)
#else
#define tun_debug(level, tun, fmt, args...) \
do { \
if (0) \
netdev_printk(level, tun->dev, fmt, ##args); \
} while (0)
#define DBG1(level, fmt, args...) \
do { \
if (0) \
printk(level fmt, ##args); \
} while (0)
#endif
#define TUN_HEADROOM 256
#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
/* TUN device flags */
/* IFF_ATTACH_QUEUE is never stored in device flags,
* overload it to mean fasync when stored there.
*/
#define TUN_FASYNC IFF_ATTACH_QUEUE
/* High bits in flags field are unused. */
#define TUN_VNET_LE 0x80000000
#define TUN_VNET_BE 0x40000000
#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
IFF_MULTI_QUEUE)
#define GOODCOPY_LEN 128
#define FLT_EXACT_COUNT 8
struct tap_filter {
unsigned int count; /* Number of addrs. Zero means disabled */
u32 mask[2]; /* Mask of the hashed addrs */
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
* to max number of VCPUs in guest. */
#define MAX_TAP_QUEUES 256
#define MAX_TAP_FLOWS 4096
#define TUN_FLOW_EXPIRE (3 * HZ)
struct tun_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
struct u64_stats_sync syncp;
u32 rx_dropped;
u32 tx_dropped;
u32 rx_frame_errors;
};
/* A tun_file connects an open character device to a tuntap netdevice. It
* also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
* tap_filter were kept in tun_struct since they were used for filtering for the
* netdevice not for a specific queue (at least I didn't see the requirement for
* this).
*
* RCU usage:
* The tun_file and tun_struct are loosely coupled, the pointer from one to the
* other can only be read while rcu_read_lock or rtnl_lock is held.
*/
struct tun_file {
struct sock sk;
struct socket socket;
struct socket_wq wq;
struct tun_struct __rcu *tun;
struct fasync_struct *fasync;
/* only used for fasnyc */
unsigned int flags;
union {
u16 queue_index;
unsigned int ifindex;
};
struct list_head next;
struct tun_struct *detached;
struct skb_array tx_array;
};
struct tun_flow_entry {
struct hlist_node hash_link;
struct rcu_head rcu;
struct tun_struct *tun;
u32 rxhash;
u32 rps_rxhash;
int queue_index;
unsigned long updated;
};
#define TUN_NUM_FLOW_ENTRIES 1024
/* Since the socket were moved to tun_file, to preserve the behavior of persist
* device, socket filter, sndbuf and vnet header size were restore when the
* file were attached to a persist device.
*/
struct tun_struct {
struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
unsigned int numqueues;
unsigned int flags;
kuid_t owner;
kgid_t group;
struct net_device *dev;
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6)
int align;
int vnet_hdr_sz;
int sndbuf;
struct tap_filter txflt;
struct sock_fprog fprog;
/* protected by rtnl lock */
bool filter_attached;
#ifdef TUN_DEBUG
int debug;
#endif
spinlock_t lock;
struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
struct timer_list flow_gc_timer;
unsigned long ageing_time;
unsigned int numdisabled;
struct list_head disabled;
void *security;
u32 flow_count;
u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
struct bpf_prog __rcu *xdp_prog;
};
#ifdef CONFIG_TUN_VNET_CROSS_LE
static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
{
return tun->flags & TUN_VNET_BE ? false :
virtio_legacy_is_little_endian();
}
static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
{
int be = !!(tun->flags & TUN_VNET_BE);
if (put_user(be, argp))
return -EFAULT;
return 0;
}
static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
{
int be;
if (get_user(be, argp))
return -EFAULT;
if (be)
tun->flags |= TUN_VNET_BE;
else
tun->flags &= ~TUN_VNET_BE;
return 0;
}
#else
static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
{
return virtio_legacy_is_little_endian();
}
static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
{
return -EINVAL;
}
static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
{
return -EINVAL;
}
#endif /* CONFIG_TUN_VNET_CROSS_LE */
static inline bool tun_is_little_endian(struct tun_struct *tun)
{
return tun->flags & TUN_VNET_LE ||
tun_legacy_is_little_endian(tun);
}
static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
{
return __virtio16_to_cpu(tun_is_little_endian(tun), val);
}
static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
{
return __cpu_to_virtio16(tun_is_little_endian(tun), val);
}
static inline u32 tun_hashfn(u32 rxhash)
{
return rxhash & 0x3ff;
}
static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
{
struct tun_flow_entry *e;
hlist_for_each_entry_rcu(e, head, hash_link) {
if (e->rxhash == rxhash)
return e;
}
return NULL;
}
static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
struct hlist_head *head,
u32 rxhash, u16 queue_index)
{
struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e) {
tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
rxhash, queue_index);
e->updated = jiffies;
e->rxhash = rxhash;
e->rps_rxhash = 0;
e->queue_index = queue_index;
e->tun = tun;
hlist_add_head_rcu(&e->hash_link, head);
++tun->flow_count;
}
return e;
}
static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
{
tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
e->rxhash, e->queue_index);
hlist_del_rcu(&e->hash_link);
kfree_rcu(e, rcu);
--tun->flow_count;
}
static void tun_flow_flush(struct tun_struct *tun)
{
int i;
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
tun_flow_delete(tun, e);
}
spin_unlock_bh(&tun->lock);
}
static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
{
int i;
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
if (e->queue_index == queue_index)
tun_flow_delete(tun, e);
}
}
spin_unlock_bh(&tun->lock);
}
static void tun_flow_cleanup(unsigned long data)
{
struct tun_struct *tun = (struct tun_struct *)data;
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
int i;
tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
spin_lock_bh(&tun->lock);
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
struct tun_flow_entry *e;
struct hlist_node *n;
hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
unsigned long this_timer;
count++;
this_timer = e->updated + delay;
if (time_before_eq(this_timer, jiffies))
tun_flow_delete(tun, e);
else if (time_before(this_timer, next_timer))
next_timer = this_timer;
}
}
if (count)
mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
spin_unlock_bh(&tun->lock);
}
static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
struct tun_file *tfile)
{
struct hlist_head *head;
struct tun_flow_entry *e;
unsigned long delay = tun->ageing_time;
u16 queue_index = tfile->queue_index;
if (!rxhash)
return;
else
head = &tun->flows[tun_hashfn(rxhash)];
rcu_read_lock();
/* We may get a very small possibility of OOO during switching, not
* worth to optimize.*/
if (tun->numqueues == 1 || tfile->detached)
goto unlock;
e = tun_flow_find(head, rxhash);
if (likely(e)) {
/* TODO: keep queueing to old queue until it's empty? */
e->queue_index = queue_index;
e->updated = jiffies;
sock_rps_record_flow_hash(e->rps_rxhash);
} else {
spin_lock_bh(&tun->lock);
if (!tun_flow_find(head, rxhash) &&
tun->flow_count < MAX_TAP_FLOWS)
tun_flow_create(tun, head, rxhash, queue_index);
if (!timer_pending(&tun->flow_gc_timer))
mod_timer(&tun->flow_gc_timer,
round_jiffies_up(jiffies + delay));
spin_unlock_bh(&tun->lock);
}
unlock:
rcu_read_unlock();
}
/**
* Save the hash received in the stack receive path and update the
* flow_hash table accordingly.
*/
static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
{
if (unlikely(e->rps_rxhash != hash))
e->rps_rxhash = hash;
}
/* We try to identify a flow through its rxhash first. The reason that
* we do not check rxq no. is because some cards(e.g 82599), chooses
* the rxq based on the txq where the last packet of the flow comes. As
* the userspace application move between processors, we may get a
* different rxq no. here. If we could not get rxhash, then we would
* hope the rxq no. may help here.
*/
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
u32 txq = 0;
u32 numqueues = 0;
rcu_read_lock();
numqueues = ACCESS_ONCE(tun->numqueues);
txq = __skb_get_hash_symmetric(skb);
if (txq) {
e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
if (e) {
tun_flow_save_rps_rxhash(e, txq);
txq = e->queue_index;
} else
/* use multiply and shift instead of expensive divide */
txq = ((u64)txq * numqueues) >> 32;
} else if (likely(skb_rx_queue_recorded(skb))) {
txq = skb_get_rx_queue(skb);
while (unlikely(txq >= numqueues))
txq -= numqueues;
}
rcu_read_unlock();
return txq;
}
static inline bool tun_not_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
struct net *net = dev_net(tun->dev);
return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
(gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
!ns_capable(net->user_ns, CAP_NET_ADMIN);
}
static void tun_set_real_num_queues(struct tun_struct *tun)
{
netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
}
static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
{
tfile->detached = tun;
list_add_tail(&tfile->next, &tun->disabled);
++tun->numdisabled;
}
static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
{
struct tun_struct *tun = tfile->detached;
tfile->detached = NULL;
list_del_init(&tfile->next);
--tun->numdisabled;
return tun;
}
static void tun_queue_purge(struct tun_file *tfile)
{
struct sk_buff *skb;
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
struct tun_struct *tun;
tun = rtnl_dereference(tfile->tun);
if (tun && !tfile->detached) {
u16 index = tfile->queue_index;
BUG_ON(index >= tun->numqueues);
rcu_assign_pointer(tun->tfiles[index],
tun->tfiles[tun->numqueues - 1]);
ntfile = rtnl_dereference(tun->tfiles[index]);
ntfile->queue_index = index;
--tun->numqueues;
if (clean) {
RCU_INIT_POINTER(tfile->tun, NULL);
sock_put(&tfile->sk);
} else
tun_disable_queue(tun, tfile);
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
/* Drop read queue */
tun_queue_purge(tfile);
tun_set_real_num_queues(tun);
} else if (tfile->detached && clean) {
tun = tun_enable_queue(tfile);
sock_put(&tfile->sk);
}
if (clean) {
if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
netif_carrier_off(tun->dev);
if (!(tun->flags & IFF_PERSIST) &&
tun->dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(tun->dev);
}
if (tun)
skb_array_cleanup(&tfile->tx_array);
sock_put(&tfile->sk);
}
}
static void tun_detach(struct tun_file *tfile, bool clean)
{
rtnl_lock();
__tun_detach(tfile, clean);
rtnl_unlock();
}
static void tun_detach_all(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
struct bpf_prog *xdp_prog = rtnl_dereference(tun->xdp_prog);
struct tun_file *tfile, *tmp;
int i, n = tun->numqueues;
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
BUG_ON(!tfile);
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
--tun->numqueues;
}
list_for_each_entry(tfile, &tun->disabled, next) {
tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
RCU_INIT_POINTER(tfile->tun, NULL);
}
BUG_ON(tun->numqueues != 0);
synchronize_net();
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
BUG_ON(tun->numdisabled != 0);
if (xdp_prog)
bpf_prog_put(xdp_prog);
if (tun->flags & IFF_PERSIST)
module_put(THIS_MODULE);
}
static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
{
struct tun_file *tfile = file->private_data;
struct net_device *dev = tun->dev;
int err;
err = security_tun_dev_attach(tfile->socket.sk, tun->security);
if (err < 0)
goto out;
err = -EINVAL;
if (rtnl_dereference(tfile->tun) && !tfile->detached)
goto out;
err = -EBUSY;
if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
goto out;
err = -E2BIG;
if (!tfile->detached &&
tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
goto out;
err = 0;
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
lock_sock(tfile->socket.sk);
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
release_sock(tfile->socket.sk);
if (!err)
goto out;
}
if (!tfile->detached &&
skb_array_init(&tfile->tx_array, dev->tx_queue_len, GFP_KERNEL)) {
err = -ENOMEM;
goto out;
}
tfile->queue_index = tun->numqueues;
tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
rcu_assign_pointer(tfile->tun, tun);
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
tun->numqueues++;
if (tfile->detached)
tun_enable_queue(tfile);
else
sock_hold(&tfile->sk);
tun_set_real_num_queues(tun);
/* device is allowed to go away first, so no need to hold extra
* refcnt.
*/
out:
return err;
}
static struct tun_struct *__tun_get(struct tun_file *tfile)
{
struct tun_struct *tun;
rcu_read_lock();
tun = rcu_dereference(tfile->tun);
if (tun)
dev_hold(tun->dev);
rcu_read_unlock();
return tun;
}
static struct tun_struct *tun_get(struct file *file)
{
return __tun_get(file->private_data);
}
static void tun_put(struct tun_struct *tun)
{
dev_put(tun->dev);
}
/* TAP filtering */
static void addr_hash_set(u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
mask[n >> 5] |= (1 << (n & 31));
}
static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
return mask[n >> 5] & (1 << (n & 31));
}
static int update_filter(struct tap_filter *filter, void __user *arg)
{
struct { u8 u[ETH_ALEN]; } *addr;
struct tun_filter uf;
int err, alen, n, nexact;
if (copy_from_user(&uf, arg, sizeof(uf)))
return -EFAULT;
if (!uf.count) {
/* Disabled */
filter->count = 0;
return 0;
}
alen = ETH_ALEN * uf.count;
addr = memdup_user(arg + sizeof(uf), alen);
if (IS_ERR(addr))
return PTR_ERR(addr);
/* The filter is updated without holding any locks. Which is
* perfectly safe. We disable it first and in the worst
* case we'll accept a few undesired packets. */
filter->count = 0;
wmb();
/* Use first set of addresses as an exact filter */
for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
nexact = n;
/* Remaining multicast addresses are hashed,
* unicast will leave the filter disabled. */
memset(filter->mask, 0, sizeof(filter->mask));
for (; n < uf.count; n++) {
if (!is_multicast_ether_addr(addr[n].u)) {
err = 0; /* no filter */
goto free_addr;
}
addr_hash_set(filter->mask, addr[n].u);
}
/* For ALLMULTI just set the mask to all ones.
* This overrides the mask populated above. */
if ((uf.flags & TUN_FLT_ALLMULTI))
memset(filter->mask, ~0, sizeof(filter->mask));
/* Now enable the filter */
wmb();
filter->count = nexact;
/* Return the number of exact filters */
err = nexact;
free_addr:
kfree(addr);
return err;
}
/* Returns: 0 - drop, !=0 - accept */
static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
{
/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
* at this point. */
struct ethhdr *eh = (struct ethhdr *) skb->data;
int i;
/* Exact match */
for (i = 0; i < filter->count; i++)
if (ether_addr_equal(eh->h_dest, filter->addr[i]))
return 1;
/* Inexact match (multicast only) */
if (is_multicast_ether_addr(eh->h_dest))
return addr_hash_test(filter->mask, eh->h_dest);
return 0;
}
/*
* Checks whether the packet is accepted or not.
* Returns: 0 - drop, !=0 - accept
*/
static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
{
if (!filter->count)
return 1;
return run_filter(filter, skb);
}
/* Network device part of the driver */
static const struct ethtool_ops tun_ethtool_ops;
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
tun_detach_all(dev);
}
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
int i;
netif_tx_start_all_queues(dev);
for (i = 0; i < tun->numqueues; i++) {
struct tun_file *tfile;
tfile = rtnl_dereference(tun->tfiles[i]);
tfile->socket.sk->sk_write_space(tfile->socket.sk);
}
return 0;
}
/* Net device close. */
static int tun_net_close(struct net_device *dev)
{
netif_tx_stop_all_queues(dev);
return 0;
}
/* Net device start xmit */
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
int txq = skb->queue_mapping;
struct tun_file *tfile;
u32 numqueues = 0;
rcu_read_lock();
tfile = rcu_dereference(tun->tfiles[txq]);
numqueues = ACCESS_ONCE(tun->numqueues);
/* Drop packet if interface is not attached */
if (txq >= numqueues)
goto drop;
#ifdef CONFIG_RPS
if (numqueues == 1 && static_key_false(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
__u32 rxhash;
rxhash = __skb_get_hash_symmetric(skb);
if (rxhash) {
struct tun_flow_entry *e;
e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
rxhash);
if (e)
tun_flow_save_rps_rxhash(e, rxhash);
}
}
#endif
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
BUG_ON(!tfile);
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
if (!check_filter(&tun->txflt, skb))
goto drop;
if (tfile->socket.sk->sk_filter &&
sk_filter(tfile->socket.sk, skb))
goto drop;
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
goto drop;
skb_tx_timestamp(skb);
/* Orphan the skb - required as we might hang on to it
* for indefinite time.
*/
skb_orphan(skb);
nf_reset(skb);
if (skb_array_produce(&tfile->tx_array, skb))
goto drop;
/* Notify and wake up reader process */
if (tfile->flags & TUN_FASYNC)
kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
tfile->socket.sk->sk_data_ready(tfile->socket.sk);
rcu_read_unlock();
return NETDEV_TX_OK;
drop:
this_cpu_inc(tun->pcpu_stats->tx_dropped);
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
return NET_XMIT_DROP;
}
static void tun_net_mclist(struct net_device *dev)
{
/*
* This callback is supposed to deal with mc filter in
* _rx_ path and has nothing to do with the _tx_ path.
* In rx path we always accept everything userspace gives us.
*/
}
static netdev_features_t tun_net_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct tun_struct *tun = netdev_priv(dev);
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tun_poll_controller(struct net_device *dev)
{
/*
* Tun only receives frames when:
* 1) the char device endpoint gets data from user space
* 2) the tun socket gets a sendmsg call from user space
* Since both of those are synchronous operations, we are guaranteed
* never to have pending data when we poll for it
* so there is nothing to do here but return.
* We need this though so netpoll recognizes us as an interface that
* supports polling, which enables bridge devices in virt setups to
* still use netconsole
*/
return;
}
#endif
static void tun_set_headroom(struct net_device *dev, int new_hr)
{
struct tun_struct *tun = netdev_priv(dev);
if (new_hr < NET_SKB_PAD)
new_hr = NET_SKB_PAD;
tun->align = new_hr;
}
static void
tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
struct tun_struct *tun = netdev_priv(dev);
struct tun_pcpu_stats *p;
int i;
for_each_possible_cpu(i) {
u64 rxpackets, rxbytes, txpackets, txbytes;
unsigned int start;
p = per_cpu_ptr(tun->pcpu_stats, i);
do {
start = u64_stats_fetch_begin(&p->syncp);
rxpackets = p->rx_packets;
rxbytes = p->rx_bytes;
txpackets = p->tx_packets;
txbytes = p->tx_bytes;
} while (u64_stats_fetch_retry(&p->syncp, start));
stats->rx_packets += rxpackets;
stats->rx_bytes += rxbytes;
stats->tx_packets += txpackets;
stats->tx_bytes += txbytes;
/* u32 counters */
rx_dropped += p->rx_dropped;
rx_frame_errors += p->rx_frame_errors;
tx_dropped += p->tx_dropped;
}
stats->rx_dropped = rx_dropped;
stats->rx_frame_errors = rx_frame_errors;
stats->tx_dropped = tx_dropped;
}
static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct tun_struct *tun = netdev_priv(dev);
struct bpf_prog *old_prog;
old_prog = rtnl_dereference(tun->xdp_prog);
rcu_assign_pointer(tun->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
return 0;
}
static u32 tun_xdp_query(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
const struct bpf_prog *xdp_prog;
xdp_prog = rtnl_dereference(tun->xdp_prog);
if (xdp_prog)
return xdp_prog->aux->id;
return 0;
}
static int tun_xdp(struct net_device *dev, struct netdev_xdp *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
return tun_xdp_set(dev, xdp->prog, xdp->extack);
case XDP_QUERY_PROG:
xdp->prog_id = tun_xdp_query(dev);
xdp->prog_attached = !!xdp->prog_id;
return 0;
default:
return -EINVAL;
}
}
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_fix_features = tun_net_fix_features,
.ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
};
static const struct net_device_ops tap_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_fix_features = tun_net_fix_features,
.ndo_set_rx_mode = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_select_queue = tun_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
.ndo_get_stats64 = tun_net_get_stats64,
.ndo_xdp = tun_xdp,
};
static void tun_flow_init(struct tun_struct *tun)
{
int i;
for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
INIT_HLIST_HEAD(&tun->flows[i]);
tun->ageing_time = TUN_FLOW_EXPIRE;
setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
mod_timer(&tun->flow_gc_timer,
round_jiffies_up(jiffies + tun->ageing_time));
}
static void tun_flow_uninit(struct tun_struct *tun)
{
del_timer_sync(&tun->flow_gc_timer);
tun_flow_flush(tun);
}
#define MIN_MTU 68
#define MAX_MTU 65535
/* Initialize net device. */
static void tun_net_init(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
dev->netdev_ops = &tun_netdev_ops;
/* Point-to-Point TUN Device */
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->mtu = 1500;
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
break;
case IFF_TAP:
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
eth_hw_addr_random(dev);
break;
}
dev->min_mtu = MIN_MTU;
dev->max_mtu = MAX_MTU - dev->hard_header_len;
}
/* Character device part */
/* Poll */
static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
struct sock *sk;
unsigned int mask = 0;
if (!tun)
return POLLERR;
sk = tfile->socket.sk;
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
poll_wait(file, sk_sleep(sk), wait);
if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
if (tun->dev->flags & IFF_UP &&
(sock_writeable(sk) ||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
sock_writeable(sk))))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
mask = POLLERR;
tun_put(tun);
return mask;
}
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
size_t prepad, size_t len,
size_t linear, int noblock)
{
struct sock *sk = tfile->socket.sk;
struct sk_buff *skb;
int err;
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
&err, 0);
if (!skb)
return ERR_PTR(err);
skb_reserve(skb, prepad);
skb_put(skb, linear);
skb->data_len = len - linear;
skb->len += len - linear;
return skb;
}
static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
struct sk_buff *skb, int more)
{
struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
struct sk_buff_head process_queue;
u32 rx_batched = tun->rx_batched;
bool rcv = false;
if (!rx_batched || (!more && skb_queue_empty(queue))) {
local_bh_disable();
netif_receive_skb(skb);
local_bh_enable();
return;
}
spin_lock(&queue->lock);
if (!more || skb_queue_len(queue) == rx_batched) {
__skb_queue_head_init(&process_queue);
skb_queue_splice_tail_init(queue, &process_queue);
rcv = true;
} else {
__skb_queue_tail(queue, skb);
}
spin_unlock(&queue->lock);
if (rcv) {
struct sk_buff *nskb;
local_bh_disable();
while ((nskb = __skb_dequeue(&process_queue)))
netif_receive_skb(nskb);
netif_receive_skb(skb);
local_bh_enable();
}
}
static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
int len, int noblock, bool zerocopy)
{
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
return false;
if (tfile->socket.sk->sk_sndbuf != INT_MAX)
return false;
if (!noblock)
return false;
if (zerocopy)
return false;
if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
return false;
return true;
}
static struct sk_buff *tun_build_skb(struct tun_struct *tun,
struct tun_file *tfile,
struct iov_iter *from,
struct virtio_net_hdr *hdr,
int len, int *skb_xdp)
{
struct page_frag *alloc_frag = ¤t->task_frag;
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
unsigned int delta = 0;
char *buf;
size_t copied;
bool xdp_xmit = false;
int err, pad = TUN_RX_PAD;
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog)
pad += TUN_HEADROOM;
buflen += SKB_DATA_ALIGN(len + pad);
rcu_read_unlock();
if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
return ERR_PTR(-ENOMEM);
buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
copied = copy_page_from_iter(alloc_frag->page,
alloc_frag->offset + pad,
len, from);
if (copied != len)
return ERR_PTR(-EFAULT);
/* There's a small window that XDP may be set after the check
* of xdp_prog above, this should be rare and for simplicity
* we do XDP on skb in case the headroom is not enough.
*/
if (hdr->gso_type || !xdp_prog)
*skb_xdp = 1;
else
*skb_xdp = 0;
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog && !*skb_xdp) {
struct xdp_buff xdp;
void *orig_data;
u32 act;
xdp.data_hard_start = buf;
xdp.data = buf + pad;
xdp.data_end = xdp.data + len;
orig_data = xdp.data;
act = bpf_prog_run_xdp(xdp_prog, &xdp);
switch (act) {
case XDP_REDIRECT:
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
err = xdp_do_redirect(tun->dev, &xdp, xdp_prog);
if (err)
goto err_redirect;
return NULL;
case XDP_TX:
xdp_xmit = true;
/* fall through */
case XDP_PASS:
delta = orig_data - xdp.data;
break;
default:
bpf_warn_invalid_xdp_action(act);
/* fall through */
case XDP_ABORTED:
trace_xdp_exception(tun->dev, xdp_prog, act);
/* fall through */
case XDP_DROP:
goto err_xdp;
}
}
skb = build_skb(buf, buflen);
if (!skb) {
rcu_read_unlock();
return ERR_PTR(-ENOMEM);
}
skb_reserve(skb, pad - delta);
skb_put(skb, len + delta);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
if (xdp_xmit) {
skb->dev = tun->dev;
generic_xdp_tx(skb, xdp_prog);
rcu_read_lock();
return NULL;
}
rcu_read_unlock();
return skb;
err_redirect:
put_page(alloc_frag->page);
err_xdp:
rcu_read_unlock();
this_cpu_inc(tun->pcpu_stats->rx_dropped);
return NULL;
}
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t total_len = iov_iter_count(from);
size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
struct tun_pcpu_stats *stats;
int good_linear;
int copylen;
bool zerocopy = false;
int err;
u32 rxhash;
int skb_xdp = 1;
if (!(tun->dev->flags & IFF_UP))
return -EIO;
if (!(tun->flags & IFF_NO_PI)) {
if (len < sizeof(pi))
return -EINVAL;
len -= sizeof(pi);
if (!copy_from_iter_full(&pi, sizeof(pi), from))
return -EFAULT;
}
if (tun->flags & IFF_VNET_HDR) {
int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
if (len < vnet_hdr_sz)
return -EINVAL;
len -= vnet_hdr_sz;
if (!copy_from_iter_full(&gso, sizeof(gso), from))
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
if (tun16_to_cpu(tun, gso.hdr_len) > len)
return -EINVAL;
iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
}
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
align += NET_IP_ALIGN;
if (unlikely(len < ETH_HLEN ||
(gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
return -EINVAL;
}
good_linear = SKB_MAX_HEAD(align);
if (msg_control) {
struct iov_iter i = *from;
/* There are 256 bytes to be copied in skb, so there is
* enough room for skb expand head in case it is used.
* The rest of the buffer is mapped from userspace.
*/
copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
if (copylen > good_linear)
copylen = good_linear;
linear = copylen;
iov_iter_advance(&i, copylen);
if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
zerocopy = true;
}
if (tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
/* For the packet that is not easy to be processed
* (e.g gso or jumbo packet), we will do it at after
* skb was created with generic XDP routine.
*/
skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
if (IS_ERR(skb)) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
return PTR_ERR(skb);
}
if (!skb)
return total_len;
} else {
if (!zerocopy) {
copylen = len;
if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
linear = good_linear;
else
linear = tun16_to_cpu(tun, gso.hdr_len);
}
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
this_cpu_inc(tun->pcpu_stats->rx_dropped);
return PTR_ERR(skb);
}
if (zerocopy)
err = zerocopy_sg_from_iter(skb, from);
else
err = skb_copy_datagram_from_iter(skb, 0, from, len);
if (err) {
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EFAULT;
}
}
if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
if (tun->flags & IFF_NO_PI) {
u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
switch (ip_version) {
case 4:
pi.proto = htons(ETH_P_IP);
break;
case 6:
pi.proto = htons(ETH_P_IPV6);
break;
default:
this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EINVAL;
}
}
skb_reset_mac_header(skb);
skb->protocol = pi.proto;
skb->dev = tun->dev;
break;
case IFF_TAP:
skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
/* copy skb_ubuf_info for callback when skb has no error */
if (zerocopy) {
skb_shinfo(skb)->destructor_arg = msg_control;
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
uarg->callback(uarg, false);
}
skb_reset_network_header(skb);
skb_probe_transport_header(skb, 0);
if (skb_xdp) {
struct bpf_prog *xdp_prog;
int ret;
rcu_read_lock();
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
ret = do_xdp_generic(xdp_prog, skb);
if (ret != XDP_PASS) {
rcu_read_unlock();
return total_len;
}
}
rcu_read_unlock();
}
rxhash = __skb_get_hash_symmetric(skb);
#ifndef CONFIG_4KSTACKS
tun_rx_batched(tun, tfile, skb, more);
#else
netif_rx_ni(skb);
#endif
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += len;
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(stats);
tun_flow_update(tun, rxhash, tfile);
return total_len;
}
static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct tun_struct *tun = tun_get(file);
struct tun_file *tfile = file->private_data;
ssize_t result;
if (!tun)
return -EBADFD;
result = tun_get_user(tun, tfile, NULL, from,
file->f_flags & O_NONBLOCK, false);
tun_put(tun);
return result;
}
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
struct tun_file *tfile,
struct sk_buff *skb,
struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
struct tun_pcpu_stats *stats;
ssize_t total;
int vlan_offset = 0;
int vlan_hlen = 0;
int vnet_hdr_sz = 0;
if (skb_vlan_tag_present(skb))
vlan_hlen = VLAN_HLEN;
if (tun->flags & IFF_VNET_HDR)
vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
total = skb->len + vlan_hlen + vnet_hdr_sz;
if (!(tun->flags & IFF_NO_PI)) {
if (iov_iter_count(iter) < sizeof(pi))
return -EINVAL;
total += sizeof(pi);
if (iov_iter_count(iter) < total) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
return -EFAULT;
}
if (vnet_hdr_sz) {
struct virtio_net_hdr gso;
if (iov_iter_count(iter) < vnet_hdr_sz)
return -EINVAL;
if (virtio_net_hdr_from_skb(skb, &gso,
tun_is_little_endian(tun), true)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
WARN_ON_ONCE(1);
return -EINVAL;
}
if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
return -EFAULT;
iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
}
if (vlan_hlen) {
int ret;
struct {
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
} veth;
veth.h_vlan_proto = skb->vlan_proto;
veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
if (ret || !iov_iter_count(iter))
goto done;
ret = copy_to_iter(&veth, sizeof(veth), iter);
if (ret != sizeof(veth) || !iov_iter_count(iter))
goto done;
}
skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
done:
/* caller is in process context, */
stats = get_cpu_ptr(tun->pcpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += skb->len + vlan_hlen;
u64_stats_update_end(&stats->syncp);
put_cpu_ptr(tun->pcpu_stats);
return total;
}
static struct sk_buff *tun_ring_recv(struct tun_file *tfile, int noblock,
int *err)
{
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb = NULL;
int error = 0;
skb = skb_array_consume(&tfile->tx_array);
if (skb)
goto out;
if (noblock) {
error = -EAGAIN;
goto out;
}
add_wait_queue(&tfile->wq.wait, &wait);
current->state = TASK_INTERRUPTIBLE;
while (1) {
skb = skb_array_consume(&tfile->tx_array);
if (skb)
break;
if (signal_pending(current)) {
error = -ERESTARTSYS;
break;
}
if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
error = -EFAULT;
break;
}
schedule();
}
current->state = TASK_RUNNING;
remove_wait_queue(&tfile->wq.wait, &wait);
out:
*err = error;
return skb;
}
static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
struct iov_iter *to,
int noblock, struct sk_buff *skb)
{
ssize_t ret;
int err;
tun_debug(KERN_INFO, tun, "tun_do_read\n");
if (!iov_iter_count(to))
return 0;
if (!skb) {
/* Read frames from ring */
skb = tun_ring_recv(tfile, noblock, &err);
if (!skb)
return err;
}
ret = tun_put_user(tun, tfile, skb, to);
if (unlikely(ret < 0))
kfree_skb(skb);
else
consume_skb(skb);
return ret;
}
static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
ssize_t len = iov_iter_count(to), ret;
if (!tun)
return -EBADFD;
ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
ret = min_t(ssize_t, ret, len);
if (ret > 0)
iocb->ki_pos = ret;
tun_put(tun);
return ret;
}
static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
BUG_ON(!(list_empty(&tun->disabled)));
free_percpu(tun->pcpu_stats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
}
static void tun_setup(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
tun->owner = INVALID_UID;
tun->group = INVALID_GID;
dev->ethtool_ops = &tun_ethtool_ops;
dev->needs_free_netdev = true;
dev->priv_destructor = tun_free_netdev;
/* We prefer our own queue length */
dev->tx_queue_len = TUN_READQ_SIZE;
}
/* Trivial set of netlink ops to allow deleting tun or tap
* device with netlink.
*/
static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
return -EINVAL;
}
static struct rtnl_link_ops tun_link_ops __read_mostly = {
.kind = DRV_NAME,
.priv_size = sizeof(struct tun_struct),
.setup = tun_setup,
.validate = tun_validate,
};
static void tun_sock_write_space(struct sock *sk)
{
struct tun_file *tfile;
wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
wqueue = sk_sleep(sk);
if (wqueue && waitqueue_active(wqueue))
wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tfile = container_of(sk, struct tun_file, sk);
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
int ret;
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = __tun_get(tfile);
if (!tun)
return -EBADFD;
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
m->msg_flags & MSG_DONTWAIT,
m->msg_flags & MSG_MORE);
tun_put(tun);
return ret;
}
static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
int flags)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun = __tun_get(tfile);
int ret;
if (!tun)
return -EBADFD;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
ret = -EINVAL;
goto out;
}
if (flags & MSG_ERRQUEUE) {
ret = sock_recv_errqueue(sock->sk, m, total_len,
SOL_PACKET, TUN_TX_TIMESTAMP);
goto out;
}
ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT,
m->msg_control);
if (ret > (ssize_t)total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
out:
tun_put(tun);
return ret;
}
static int tun_peek_len(struct socket *sock)
{
struct tun_file *tfile = container_of(sock, struct tun_file, socket);
struct tun_struct *tun;
int ret = 0;
tun = __tun_get(tfile);
if (!tun)
return 0;
ret = skb_array_peek_len(&tfile->tx_array);
tun_put(tun);
return ret;
}
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
.peek_len = tun_peek_len,
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
};
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
.obj_size = sizeof(struct tun_file),
};
static int tun_flags(struct tun_struct *tun)
{
return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
}
static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return sprintf(buf, "0x%x\n", tun_flags(tun));
}
static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return uid_valid(tun->owner)?
sprintf(buf, "%u\n",
from_kuid_munged(current_user_ns(), tun->owner)):
sprintf(buf, "-1\n");
}
static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return gid_valid(tun->group) ?
sprintf(buf, "%u\n",
from_kgid_munged(current_user_ns(), tun->group)):
sprintf(buf, "-1\n");
}
static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
static struct attribute *tun_dev_attrs[] = {
&dev_attr_tun_flags.attr,
&dev_attr_owner.attr,
&dev_attr_group.attr,
NULL
};
static const struct attribute_group tun_attr_group = {
.attrs = tun_dev_attrs
};
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
struct tun_struct *tun;
struct tun_file *tfile = file->private_data;
struct net_device *dev;
int err;
if (tfile->detached)
return -EINVAL;
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
if (ifr->ifr_flags & IFF_TUN_EXCL)
return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
tun = netdev_priv(dev);
else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
tun = netdev_priv(dev);
else
return -EINVAL;
if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
!!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
if (tun_not_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
if (err < 0)
return err;
err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
if (err < 0)
return err;
if (tun->flags & IFF_MULTI_QUEUE &&
(tun->numqueues + tun->numdisabled > 1)) {
/* One or more queue has already been attached, no need
* to initialize the device again.
*/
return 0;
}
}
else {
char *name;
unsigned long flags = 0;
int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
MAX_TAP_QUEUES : 1;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_create();
if (err < 0)
return err;
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
/* TUN device */
flags |= IFF_TUN;
name = "tun%d";
} else if (ifr->ifr_flags & IFF_TAP) {
/* TAP device */
flags |= IFF_TAP;
name = "tap%d";
} else
return -EINVAL;
if (*ifr->ifr_name)
name = ifr->ifr_name;
dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
NET_NAME_UNKNOWN, tun_setup, queues,
queues);
if (!dev)
return -ENOMEM;
err = dev_get_valid_name(net, dev, name);
if (err)
goto err_free_dev;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
dev->ifindex = tfile->ifindex;
dev->sysfs_groups[0] = &tun_attr_group;
tun = netdev_priv(dev);
tun->dev = dev;
tun->flags = flags;
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
tun->align = NET_SKB_PAD;
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
tun->rx_batched = 0;
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
if (!tun->pcpu_stats) {
err = -ENOMEM;
goto err_free_dev;
}
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
if (err < 0)
goto err_free_stat;
tun_net_init(dev);
tun_flow_init(tun);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features | NETIF_F_LLTX;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
if (err < 0)
goto err_free_flow;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_detach;
}
netif_carrier_on(tun->dev);
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
tun->flags = (tun->flags & ~TUN_FEATURES) |
(ifr->ifr_flags & TUN_FEATURES);
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
if (netif_running(tun->dev))
netif_tx_wake_all_queues(tun->dev);
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
err_detach:
tun_detach_all(dev);
/* register_netdevice() already called tun_free_netdev() */
goto err_free_dev;
err_free_flow:
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
err_free_stat:
free_percpu(tun->pcpu_stats);
err_free_dev:
free_netdev(dev);
return err;
}
static void tun_get_iff(struct net *net, struct tun_struct *tun,
struct ifreq *ifr)
{
tun_debug(KERN_INFO, tun, "tun_get_iff\n");
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
}
/* This is like a cut-down ethtool ops, except done via tun fd so no
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
{
netdev_features_t features = 0;
if (arg & TUN_F_CSUM) {
features |= NETIF_F_HW_CSUM;
arg &= ~TUN_F_CSUM;
if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
if (arg & TUN_F_TSO_ECN) {
features |= NETIF_F_TSO_ECN;
arg &= ~TUN_F_TSO_ECN;
}
if (arg & TUN_F_TSO4)
features |= NETIF_F_TSO;
if (arg & TUN_F_TSO6)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
}
/* This gives the user a way to test for new features in future by
* trying to set them. */
if (arg)
return -EINVAL;
tun->set_features = features;
tun->dev->wanted_features &= ~TUN_USER_FEATURES;
tun->dev->wanted_features |= features;
netdev_update_features(tun->dev);
return 0;
}
static void tun_detach_filter(struct tun_struct *tun, int n)
{
int i;
struct tun_file *tfile;
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
lock_sock(tfile->socket.sk);
sk_detach_filter(tfile->socket.sk);
release_sock(tfile->socket.sk);
}
tun->filter_attached = false;
}
static int tun_attach_filter(struct tun_struct *tun)
{
int i, ret = 0;
struct tun_file *tfile;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
lock_sock(tfile->socket.sk);
ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
release_sock(tfile->socket.sk);
if (ret) {
tun_detach_filter(tun, i);
return ret;
}
}
tun->filter_attached = true;
return ret;
}
static void tun_set_sndbuf(struct tun_struct *tun)
{
struct tun_file *tfile;
int i;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
tfile->socket.sk->sk_sndbuf = tun->sndbuf;
}
}
static int tun_set_queue(struct file *file, struct ifreq *ifr)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
int ret = 0;
rtnl_lock();
if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
tun = tfile->detached;
if (!tun) {
ret = -EINVAL;
goto unlock;
}
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
ret = tun_attach(tun, file, false);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
ret = -EINVAL;
else
__tun_detach(tfile, false);
} else
ret = -EINVAL;
unlock:
rtnl_unlock();
return ret;
}
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct ifreq ifr;
kuid_t owner;
kgid_t group;
int sndbuf;
int vnet_hdr_sz;
unsigned int ifindex;
int le;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
} else {
memset(&ifr, 0, sizeof(ifr));
}
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
* TUNSETIFF.
*/
return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
(unsigned int __user*)argp);
} else if (cmd == TUNSETQUEUE)
return tun_set_queue(file, &ifr);
ret = 0;
rtnl_lock();
tun = __tun_get(tfile);
if (cmd == TUNSETIFF) {
ret = -EEXIST;
if (tun)
goto unlock;
ifr.ifr_name[IFNAMSIZ-1] = '\0';
ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
if (ret)
goto unlock;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
goto unlock;
}
if (cmd == TUNSETIFINDEX) {
ret = -EPERM;
if (tun)
goto unlock;
ret = -EFAULT;
if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
goto unlock;
ret = 0;
tfile->ifindex = ifindex;
goto unlock;
}
ret = -EBADFD;
if (!tun)
goto unlock;
tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
ret = 0;
switch (cmd) {
case TUNGETIFF:
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
if (tfile->detached)
ifr.ifr_flags |= IFF_DETACH_QUEUE;
if (!tfile->socket.sk->sk_filter)
ifr.ifr_flags |= IFF_NOFILTER;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case TUNSETNOCSUM:
/* Disable/Enable checksum */
/* [unimplemented] */
tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
/* Disable/Enable persist mode. Keep an extra reference to the
* module to prevent the module being unprobed.
*/
if (arg && !(tun->flags & IFF_PERSIST)) {
tun->flags |= IFF_PERSIST;
__module_get(THIS_MODULE);
}
if (!arg && (tun->flags & IFF_PERSIST)) {
tun->flags &= ~IFF_PERSIST;
module_put(THIS_MODULE);
}
tun_debug(KERN_INFO, tun, "persist %s\n",
arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
/* Set owner of the device */
owner = make_kuid(current_user_ns(), arg);
if (!uid_valid(owner)) {
ret = -EINVAL;
break;
}
tun->owner = owner;
tun_debug(KERN_INFO, tun, "owner set to %u\n",
from_kuid(&init_user_ns, tun->owner));
break;
case TUNSETGROUP:
/* Set group of the device */
group = make_kgid(current_user_ns(), arg);
if (!gid_valid(group)) {
ret = -EINVAL;
break;
}
tun->group = group;
tun_debug(KERN_INFO, tun, "group set to %u\n",
from_kgid(&init_user_ns, tun->group));
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
tun_debug(KERN_INFO, tun,
"Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
tun_debug(KERN_INFO, tun, "linktype set to %d\n",
tun->dev->type);
ret = 0;
}
break;
#ifdef TUN_DEBUG
case TUNSETDEBUG:
tun->debug = arg;
break;
#endif
case TUNSETOFFLOAD:
ret = set_offload(tun, arg);
break;
case TUNSETTXFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = update_filter(&tun->txflt, (void __user *)arg);
break;
case SIOCGIFHWADDR:
/* Get hw address */
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
ifr.ifr_hwaddr.sa_family = tun->dev->type;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case SIOCSIFHWADDR:
/* Set hw address */
tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
ifr.ifr_hwaddr.sa_data);
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
break;
case TUNGETSNDBUF:
sndbuf = tfile->socket.sk->sk_sndbuf;
if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
ret = -EFAULT;
break;
case TUNSETSNDBUF:
if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
ret = -EFAULT;
break;
}
tun->sndbuf = sndbuf;
tun_set_sndbuf(tun);
break;
case TUNGETVNETHDRSZ:
vnet_hdr_sz = tun->vnet_hdr_sz;
if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
ret = -EFAULT;
break;
case TUNSETVNETHDRSZ:
if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
ret = -EFAULT;
break;
}
if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
ret = -EINVAL;
break;
}
tun->vnet_hdr_sz = vnet_hdr_sz;
break;
case TUNGETVNETLE:
le = !!(tun->flags & TUN_VNET_LE);
if (put_user(le, (int __user *)argp))
ret = -EFAULT;
break;
case TUNSETVNETLE:
if (get_user(le, (int __user *)argp)) {
ret = -EFAULT;
break;
}
if (le)
tun->flags |= TUN_VNET_LE;
else
tun->flags &= ~TUN_VNET_LE;
break;
case TUNGETVNETBE:
ret = tun_get_vnet_be(tun, argp);
break;
case TUNSETVNETBE:
ret = tun_set_vnet_be(tun, argp);
break;
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
break;
ret = tun_attach_filter(tun);
break;
case TUNDETACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = 0;
tun_detach_filter(tun, tun->numqueues);
break;
case TUNGETFILTER:
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
break;
ret = -EFAULT;
if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
break;
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
unlock:
rtnl_unlock();
if (tun)
tun_put(tun);
return ret;
}
static long tun_chr_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
}
#ifdef CONFIG_COMPAT
static long tun_chr_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case TUNSETIFF:
case TUNGETIFF:
case TUNSETTXFILTER:
case TUNGETSNDBUF:
case TUNSETSNDBUF:
case SIOCGIFHWADDR:
case SIOCSIFHWADDR:
arg = (unsigned long)compat_ptr(arg);
break;
default:
arg = (compat_ulong_t)arg;
break;
}
/*
* compat_ifreq is shorter than ifreq, so we must not access beyond
* the end of that structure. All fields that are used in this
* driver are compatible though, we don't need to convert the
* contents.
*/
return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
}
#endif /* CONFIG_COMPAT */
static int tun_chr_fasync(int fd, struct file *file, int on)
{
struct tun_file *tfile = file->private_data;
int ret;
if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
goto out;
if (on) {
__f_setown(file, task_pid(current), PIDTYPE_PID, 0);
tfile->flags |= TUN_FASYNC;
} else
tfile->flags &= ~TUN_FASYNC;
ret = 0;
out:
return ret;
}
static int tun_chr_open(struct inode *inode, struct file * file)
{
struct net *net = current->nsproxy->net_ns;
struct tun_file *tfile;
DBG1(KERN_INFO, "tunX: tun_chr_open\n");
tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
&tun_proto, 0);
if (!tfile)
return -ENOMEM;
RCU_INIT_POINTER(tfile->tun, NULL);
tfile->flags = 0;
tfile->ifindex = 0;
init_waitqueue_head(&tfile->wq.wait);
RCU_INIT_POINTER(tfile->socket.wq, &tfile->wq);
tfile->socket.file = file;
tfile->socket.ops = &tun_socket_ops;
sock_init_data(&tfile->socket, &tfile->sk);
tfile->sk.sk_write_space = tun_sock_write_space;
tfile->sk.sk_sndbuf = INT_MAX;
file->private_data = tfile;
INIT_LIST_HEAD(&tfile->next);
sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
return 0;
}
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
tun_detach(tfile, true);
return 0;
}
#ifdef CONFIG_PROC_FS
static void tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
{
struct tun_struct *tun;
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
rtnl_lock();
tun = tun_get(f);
if (tun)
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
rtnl_unlock();
if (tun)
tun_put(tun);
seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
}
#endif
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read_iter = tun_chr_read_iter,
.write_iter = tun_chr_write_iter,
.poll = tun_chr_poll,
.unlocked_ioctl = tun_chr_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tun_chr_compat_ioctl,
#endif
.open = tun_chr_open,
.release = tun_chr_close,
.fasync = tun_chr_fasync,
#ifdef CONFIG_PROC_FS
.show_fdinfo = tun_chr_show_fdinfo,
#endif
};
static struct miscdevice tun_miscdev = {
.minor = TUN_MINOR,
.name = "tun",
.nodename = "net/tun",
.fops = &tun_fops,
};
/* ethtool interface */
static int tun_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
ethtool_link_ksettings_zero_link_mode(cmd, supported);
ethtool_link_ksettings_zero_link_mode(cmd, advertising);
cmd->base.speed = SPEED_10;
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_TP;
cmd->base.phy_address = 0;
cmd->base.autoneg = AUTONEG_DISABLE;
return 0;
}
static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tun_struct *tun = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case IFF_TAP:
strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
static u32 tun_get_msglevel(struct net_device *dev)
{
#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
return tun->debug;
#else
return -EOPNOTSUPP;
#endif
}
static void tun_set_msglevel(struct net_device *dev, u32 value)
{
#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
tun->debug = value;
#endif
}
static int tun_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct tun_struct *tun = netdev_priv(dev);
ec->rx_max_coalesced_frames = tun->rx_batched;
return 0;
}
static int tun_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec)
{
struct tun_struct *tun = netdev_priv(dev);
if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
tun->rx_batched = NAPI_POLL_WEIGHT;
else
tun->rx_batched = ec->rx_max_coalesced_frames;
return 0;
}
static const struct ethtool_ops tun_ethtool_ops = {
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_coalesce = tun_get_coalesce,
.set_coalesce = tun_set_coalesce,
.get_link_ksettings = tun_get_link_ksettings,
};
static int tun_queue_resize(struct tun_struct *tun)
{
struct net_device *dev = tun->dev;
struct tun_file *tfile;
struct skb_array **arrays;
int n = tun->numqueues + tun->numdisabled;
int ret, i;
arrays = kmalloc_array(n, sizeof(*arrays), GFP_KERNEL);
if (!arrays)
return -ENOMEM;
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
arrays[i] = &tfile->tx_array;
}
list_for_each_entry(tfile, &tun->disabled, next)
arrays[i++] = &tfile->tx_array;
ret = skb_array_resize_multiple(arrays, n,
dev->tx_queue_len, GFP_KERNEL);
kfree(arrays);
return ret;
}
static int tun_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct tun_struct *tun = netdev_priv(dev);
if (dev->rtnl_link_ops != &tun_link_ops)
return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGE_TX_QUEUE_LEN:
if (tun_queue_resize(tun))
return NOTIFY_BAD;
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block tun_notifier_block __read_mostly = {
.notifier_call = tun_device_event,
};
static int __init tun_init(void)
{
int ret = 0;
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
ret = rtnl_link_register(&tun_link_ops);
if (ret) {
pr_err("Can't register link_ops\n");
goto err_linkops;
}
ret = misc_register(&tun_miscdev);
if (ret) {
pr_err("Can't register misc device %d\n", TUN_MINOR);
goto err_misc;
}
ret = register_netdevice_notifier(&tun_notifier_block);
if (ret) {
pr_err("Can't register netdevice notifier\n");
goto err_notifier;
}
return 0;
err_notifier:
misc_deregister(&tun_miscdev);
err_misc:
rtnl_link_unregister(&tun_link_ops);
err_linkops:
return ret;
}
static void tun_cleanup(void)
{
misc_deregister(&tun_miscdev);
rtnl_link_unregister(&tun_link_ops);
unregister_netdevice_notifier(&tun_notifier_block);
}
/* Get an underlying socket object from tun file. Returns error unless file is
* attached to a device. The returned object works like a packet socket, it
* can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
* holding a reference to the file for as long as the socket is in use. */
struct socket *tun_get_socket(struct file *file)
{
struct tun_file *tfile;
if (file->f_op != &tun_fops)
return ERR_PTR(-EINVAL);
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
return &tfile->socket;
}
EXPORT_SYMBOL_GPL(tun_get_socket);
struct skb_array *tun_get_skb_array(struct file *file)
{
struct tun_file *tfile;
if (file->f_op != &tun_fops)
return ERR_PTR(-EINVAL);
tfile = file->private_data;
if (!tfile)
return ERR_PTR(-EBADFD);
return &tfile->tx_array;
}
EXPORT_SYMBOL_GPL(tun_get_skb_array);
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(TUN_MINOR);
MODULE_ALIAS("devname:net/tun");
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_624_0 |
crossvul-cpp_data_good_3060_4 | /*
* fs/nfs/idmap.c
*
* UID and GID to name mapping for clients.
*
* Copyright (c) 2002 The Regents of the University of Michigan.
* All rights reserved.
*
* Marius Aamodt Eriksen <marius@umich.edu>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/types.h>
#include <linux/parser.h>
#include <linux/fs.h>
#include <linux/nfs_idmap.h>
#include <net/net_namespace.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <linux/module.h>
#include "internal.h"
#include "netns.h"
#include "nfs4trace.h"
#define NFS_UINT_MAXLEN 11
static const struct cred *id_resolver_cache;
static struct key_type key_type_id_resolver_legacy;
struct idmap_legacy_upcalldata {
struct rpc_pipe_msg pipe_msg;
struct idmap_msg idmap_msg;
struct key_construction *key_cons;
struct idmap *idmap;
};
struct idmap {
struct rpc_pipe_dir_object idmap_pdo;
struct rpc_pipe *idmap_pipe;
struct idmap_legacy_upcalldata *idmap_upcall_data;
struct mutex idmap_mutex;
};
/**
* nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
* @fattr: fully initialised struct nfs_fattr
* @owner_name: owner name string cache
* @group_name: group name string cache
*/
void nfs_fattr_init_names(struct nfs_fattr *fattr,
struct nfs4_string *owner_name,
struct nfs4_string *group_name)
{
fattr->owner_name = owner_name;
fattr->group_name = group_name;
}
static void nfs_fattr_free_owner_name(struct nfs_fattr *fattr)
{
fattr->valid &= ~NFS_ATTR_FATTR_OWNER_NAME;
kfree(fattr->owner_name->data);
}
static void nfs_fattr_free_group_name(struct nfs_fattr *fattr)
{
fattr->valid &= ~NFS_ATTR_FATTR_GROUP_NAME;
kfree(fattr->group_name->data);
}
static bool nfs_fattr_map_owner_name(struct nfs_server *server, struct nfs_fattr *fattr)
{
struct nfs4_string *owner = fattr->owner_name;
kuid_t uid;
if (!(fattr->valid & NFS_ATTR_FATTR_OWNER_NAME))
return false;
if (nfs_map_name_to_uid(server, owner->data, owner->len, &uid) == 0) {
fattr->uid = uid;
fattr->valid |= NFS_ATTR_FATTR_OWNER;
}
return true;
}
static bool nfs_fattr_map_group_name(struct nfs_server *server, struct nfs_fattr *fattr)
{
struct nfs4_string *group = fattr->group_name;
kgid_t gid;
if (!(fattr->valid & NFS_ATTR_FATTR_GROUP_NAME))
return false;
if (nfs_map_group_to_gid(server, group->data, group->len, &gid) == 0) {
fattr->gid = gid;
fattr->valid |= NFS_ATTR_FATTR_GROUP;
}
return true;
}
/**
* nfs_fattr_free_names - free up the NFSv4 owner and group strings
* @fattr: a fully initialised nfs_fattr structure
*/
void nfs_fattr_free_names(struct nfs_fattr *fattr)
{
if (fattr->valid & NFS_ATTR_FATTR_OWNER_NAME)
nfs_fattr_free_owner_name(fattr);
if (fattr->valid & NFS_ATTR_FATTR_GROUP_NAME)
nfs_fattr_free_group_name(fattr);
}
/**
* nfs_fattr_map_and_free_names - map owner/group strings into uid/gid and free
* @server: pointer to the filesystem nfs_server structure
* @fattr: a fully initialised nfs_fattr structure
*
* This helper maps the cached NFSv4 owner/group strings in fattr into
* their numeric uid/gid equivalents, and then frees the cached strings.
*/
void nfs_fattr_map_and_free_names(struct nfs_server *server, struct nfs_fattr *fattr)
{
if (nfs_fattr_map_owner_name(server, fattr))
nfs_fattr_free_owner_name(fattr);
if (nfs_fattr_map_group_name(server, fattr))
nfs_fattr_free_group_name(fattr);
}
static int nfs_map_string_to_numeric(const char *name, size_t namelen, __u32 *res)
{
unsigned long val;
char buf[16];
if (memchr(name, '@', namelen) != NULL || namelen >= sizeof(buf))
return 0;
memcpy(buf, name, namelen);
buf[namelen] = '\0';
if (kstrtoul(buf, 0, &val) != 0)
return 0;
*res = val;
return 1;
}
static int nfs_map_numeric_to_string(__u32 id, char *buf, size_t buflen)
{
return snprintf(buf, buflen, "%u", id);
}
static struct key_type key_type_id_resolver = {
.name = "id_resolver",
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.read = user_read,
};
static int nfs_idmap_init_keyring(void)
{
struct cred *cred;
struct key *keyring;
int ret = 0;
printk(KERN_NOTICE "NFS: Registering the %s key type\n",
key_type_id_resolver.name);
cred = prepare_kernel_cred(NULL);
if (!cred)
return -ENOMEM;
keyring = keyring_alloc(".id_resolver",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
ret = register_key_type(&key_type_id_resolver);
if (ret < 0)
goto failed_put_key;
ret = register_key_type(&key_type_id_resolver_legacy);
if (ret < 0)
goto failed_reg_legacy;
set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
id_resolver_cache = cred;
return 0;
failed_reg_legacy:
unregister_key_type(&key_type_id_resolver);
failed_put_key:
key_put(keyring);
failed_put_cred:
put_cred(cred);
return ret;
}
static void nfs_idmap_quit_keyring(void)
{
key_revoke(id_resolver_cache->thread_keyring);
unregister_key_type(&key_type_id_resolver);
unregister_key_type(&key_type_id_resolver_legacy);
put_cred(id_resolver_cache);
}
/*
* Assemble the description to pass to request_key()
* This function will allocate a new string and update dest to point
* at it. The caller is responsible for freeing dest.
*
* On error 0 is returned. Otherwise, the length of dest is returned.
*/
static ssize_t nfs_idmap_get_desc(const char *name, size_t namelen,
const char *type, size_t typelen, char **desc)
{
char *cp;
size_t desclen = typelen + namelen + 2;
*desc = kmalloc(desclen, GFP_KERNEL);
if (!*desc)
return -ENOMEM;
cp = *desc;
memcpy(cp, type, typelen);
cp += typelen;
*cp++ = ':';
memcpy(cp, name, namelen);
cp += namelen;
*cp = '\0';
return desclen;
}
static struct key *nfs_idmap_request_key(const char *name, size_t namelen,
const char *type, struct idmap *idmap)
{
char *desc;
struct key *rkey;
ssize_t ret;
ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc);
if (ret <= 0)
return ERR_PTR(ret);
rkey = request_key(&key_type_id_resolver, desc, "");
if (IS_ERR(rkey)) {
mutex_lock(&idmap->idmap_mutex);
rkey = request_key_with_auxdata(&key_type_id_resolver_legacy,
desc, "", 0, idmap);
mutex_unlock(&idmap->idmap_mutex);
}
if (!IS_ERR(rkey))
set_bit(KEY_FLAG_ROOT_CAN_INVAL, &rkey->flags);
kfree(desc);
return rkey;
}
static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
const char *type, void *data,
size_t data_size, struct idmap *idmap)
{
const struct cred *saved_cred;
struct key *rkey;
struct user_key_payload *payload;
ssize_t ret;
saved_cred = override_creds(id_resolver_cache);
rkey = nfs_idmap_request_key(name, namelen, type, idmap);
revert_creds(saved_cred);
if (IS_ERR(rkey)) {
ret = PTR_ERR(rkey);
goto out;
}
rcu_read_lock();
rkey->perm |= KEY_USR_VIEW;
ret = key_validate(rkey);
if (ret < 0)
goto out_up;
payload = rcu_dereference(rkey->payload.rcudata);
if (IS_ERR_OR_NULL(payload)) {
ret = PTR_ERR(payload);
goto out_up;
}
ret = payload->datalen;
if (ret > 0 && ret <= data_size)
memcpy(data, payload->data, ret);
else
ret = -EINVAL;
out_up:
rcu_read_unlock();
key_put(rkey);
out:
return ret;
}
/* ID -> Name */
static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
size_t buflen, struct idmap *idmap)
{
char id_str[NFS_UINT_MAXLEN];
int id_len;
ssize_t ret;
id_len = snprintf(id_str, sizeof(id_str), "%u", id);
ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
if (ret < 0)
return -EINVAL;
return ret;
}
/* Name -> ID */
static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *type,
__u32 *id, struct idmap *idmap)
{
char id_str[NFS_UINT_MAXLEN];
long id_long;
ssize_t data_size;
int ret = 0;
data_size = nfs_idmap_get_key(name, namelen, type, id_str, NFS_UINT_MAXLEN, idmap);
if (data_size <= 0) {
ret = -EINVAL;
} else {
ret = kstrtol(id_str, 10, &id_long);
*id = (__u32)id_long;
}
return ret;
}
/* idmap classic begins here */
enum {
Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
};
static const match_table_t nfs_idmap_tokens = {
{ Opt_find_uid, "uid:%s" },
{ Opt_find_gid, "gid:%s" },
{ Opt_find_user, "user:%s" },
{ Opt_find_group, "group:%s" },
{ Opt_find_err, NULL }
};
static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *);
static ssize_t idmap_pipe_downcall(struct file *, const char __user *,
size_t);
static void idmap_release_pipe(struct inode *);
static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *);
static const struct rpc_pipe_ops idmap_upcall_ops = {
.upcall = rpc_pipe_generic_upcall,
.downcall = idmap_pipe_downcall,
.release_pipe = idmap_release_pipe,
.destroy_msg = idmap_pipe_destroy_msg,
};
static struct key_type key_type_id_resolver_legacy = {
.name = "id_legacy",
.preparse = user_preparse,
.free_preparse = user_free_preparse,
.instantiate = generic_key_instantiate,
.revoke = user_revoke,
.destroy = user_destroy,
.describe = user_describe,
.read = user_read,
.request_key = nfs_idmap_legacy_upcall,
};
static void nfs_idmap_pipe_destroy(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct idmap *idmap = pdo->pdo_data;
struct rpc_pipe *pipe = idmap->idmap_pipe;
if (pipe->dentry) {
rpc_unlink(pipe->dentry);
pipe->dentry = NULL;
}
}
static int nfs_idmap_pipe_create(struct dentry *dir,
struct rpc_pipe_dir_object *pdo)
{
struct idmap *idmap = pdo->pdo_data;
struct rpc_pipe *pipe = idmap->idmap_pipe;
struct dentry *dentry;
dentry = rpc_mkpipe_dentry(dir, "idmap", idmap, pipe);
if (IS_ERR(dentry))
return PTR_ERR(dentry);
pipe->dentry = dentry;
return 0;
}
static const struct rpc_pipe_dir_object_ops nfs_idmap_pipe_dir_object_ops = {
.create = nfs_idmap_pipe_create,
.destroy = nfs_idmap_pipe_destroy,
};
int
nfs_idmap_new(struct nfs_client *clp)
{
struct idmap *idmap;
struct rpc_pipe *pipe;
int error;
idmap = kzalloc(sizeof(*idmap), GFP_KERNEL);
if (idmap == NULL)
return -ENOMEM;
rpc_init_pipe_dir_object(&idmap->idmap_pdo,
&nfs_idmap_pipe_dir_object_ops,
idmap);
pipe = rpc_mkpipe_data(&idmap_upcall_ops, 0);
if (IS_ERR(pipe)) {
error = PTR_ERR(pipe);
goto err;
}
idmap->idmap_pipe = pipe;
mutex_init(&idmap->idmap_mutex);
error = rpc_add_pipe_dir_object(clp->cl_net,
&clp->cl_rpcclient->cl_pipedir_objects,
&idmap->idmap_pdo);
if (error)
goto err_destroy_pipe;
clp->cl_idmap = idmap;
return 0;
err_destroy_pipe:
rpc_destroy_pipe_data(idmap->idmap_pipe);
err:
kfree(idmap);
return error;
}
void
nfs_idmap_delete(struct nfs_client *clp)
{
struct idmap *idmap = clp->cl_idmap;
if (!idmap)
return;
clp->cl_idmap = NULL;
rpc_remove_pipe_dir_object(clp->cl_net,
&clp->cl_rpcclient->cl_pipedir_objects,
&idmap->idmap_pdo);
rpc_destroy_pipe_data(idmap->idmap_pipe);
kfree(idmap);
}
int nfs_idmap_init(void)
{
int ret;
ret = nfs_idmap_init_keyring();
if (ret != 0)
goto out;
out:
return ret;
}
void nfs_idmap_quit(void)
{
nfs_idmap_quit_keyring();
}
static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap,
struct idmap_msg *im,
struct rpc_pipe_msg *msg)
{
substring_t substr;
int token, ret;
im->im_type = IDMAP_TYPE_GROUP;
token = match_token(desc, nfs_idmap_tokens, &substr);
switch (token) {
case Opt_find_uid:
im->im_type = IDMAP_TYPE_USER;
case Opt_find_gid:
im->im_conv = IDMAP_CONV_NAMETOID;
ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ);
break;
case Opt_find_user:
im->im_type = IDMAP_TYPE_USER;
case Opt_find_group:
im->im_conv = IDMAP_CONV_IDTONAME;
ret = match_int(&substr, &im->im_id);
break;
default:
ret = -EINVAL;
goto out;
}
msg->data = im;
msg->len = sizeof(struct idmap_msg);
out:
return ret;
}
static bool
nfs_idmap_prepare_pipe_upcall(struct idmap *idmap,
struct idmap_legacy_upcalldata *data)
{
if (idmap->idmap_upcall_data != NULL) {
WARN_ON_ONCE(1);
return false;
}
idmap->idmap_upcall_data = data;
return true;
}
static void
nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret)
{
struct key_construction *cons = idmap->idmap_upcall_data->key_cons;
kfree(idmap->idmap_upcall_data);
idmap->idmap_upcall_data = NULL;
complete_request_key(cons, ret);
}
static void
nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret)
{
if (idmap->idmap_upcall_data != NULL)
nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
}
static int nfs_idmap_legacy_upcall(struct key_construction *cons,
const char *op,
void *aux)
{
struct idmap_legacy_upcalldata *data;
struct rpc_pipe_msg *msg;
struct idmap_msg *im;
struct idmap *idmap = (struct idmap *)aux;
struct key *key = cons->key;
int ret = -ENOMEM;
/* msg and im are freed in idmap_pipe_destroy_msg */
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out1;
msg = &data->pipe_msg;
im = &data->idmap_msg;
data->idmap = idmap;
data->key_cons = cons;
ret = nfs_idmap_prepare_message(key->description, idmap, im, msg);
if (ret < 0)
goto out2;
ret = -EAGAIN;
if (!nfs_idmap_prepare_pipe_upcall(idmap, data))
goto out2;
ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
if (ret < 0)
nfs_idmap_abort_pipe_upcall(idmap, ret);
return ret;
out2:
kfree(data);
out1:
complete_request_key(cons, ret);
return ret;
}
static int nfs_idmap_instantiate(struct key *key, struct key *authkey, char *data, size_t datalen)
{
return key_instantiate_and_link(key, data, datalen,
id_resolver_cache->thread_keyring,
authkey);
}
static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
struct idmap_msg *upcall,
struct key *key, struct key *authkey)
{
char id_str[NFS_UINT_MAXLEN];
size_t len;
int ret = -ENOKEY;
/* ret = -ENOKEY */
if (upcall->im_type != im->im_type || upcall->im_conv != im->im_conv)
goto out;
switch (im->im_conv) {
case IDMAP_CONV_NAMETOID:
if (strcmp(upcall->im_name, im->im_name) != 0)
break;
/* Note: here we store the NUL terminator too */
len = sprintf(id_str, "%d", im->im_id) + 1;
ret = nfs_idmap_instantiate(key, authkey, id_str, len);
break;
case IDMAP_CONV_IDTONAME:
if (upcall->im_id != im->im_id)
break;
len = strlen(im->im_name);
ret = nfs_idmap_instantiate(key, authkey, im->im_name, len);
break;
default:
ret = -EINVAL;
}
out:
return ret;
}
static ssize_t
idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
{
struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct idmap *idmap = (struct idmap *)rpci->private;
struct key_construction *cons;
struct idmap_msg im;
size_t namelen_in;
int ret = -ENOKEY;
/* If instantiation is successful, anyone waiting for key construction
* will have been woken up and someone else may now have used
* idmap_key_cons - so after this point we may no longer touch it.
*/
if (idmap->idmap_upcall_data == NULL)
goto out_noupcall;
cons = idmap->idmap_upcall_data->key_cons;
if (mlen != sizeof(im)) {
ret = -ENOSPC;
goto out;
}
if (copy_from_user(&im, src, mlen) != 0) {
ret = -EFAULT;
goto out;
}
if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
ret = -ENOKEY;
goto out;
}
namelen_in = strnlen(im.im_name, IDMAP_NAMESZ);
if (namelen_in == 0 || namelen_in == IDMAP_NAMESZ) {
ret = -EINVAL;
goto out;
}
ret = nfs_idmap_read_and_verify_message(&im,
&idmap->idmap_upcall_data->idmap_msg,
cons->key, cons->authkey);
if (ret >= 0) {
key_set_timeout(cons->key, nfs_idmap_cache_timeout);
ret = mlen;
}
out:
nfs_idmap_complete_pipe_upcall_locked(idmap, ret);
out_noupcall:
return ret;
}
static void
idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg)
{
struct idmap_legacy_upcalldata *data = container_of(msg,
struct idmap_legacy_upcalldata,
pipe_msg);
struct idmap *idmap = data->idmap;
if (msg->errno)
nfs_idmap_abort_pipe_upcall(idmap, msg->errno);
}
static void
idmap_release_pipe(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
struct idmap *idmap = (struct idmap *)rpci->private;
nfs_idmap_abort_pipe_upcall(idmap, -EPIPE);
}
int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, kuid_t *uid)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
__u32 id = -1;
int ret = 0;
if (!nfs_map_string_to_numeric(name, namelen, &id))
ret = nfs_idmap_lookup_id(name, namelen, "uid", &id, idmap);
if (ret == 0) {
*uid = make_kuid(&init_user_ns, id);
if (!uid_valid(*uid))
ret = -ERANGE;
}
trace_nfs4_map_name_to_uid(name, namelen, id, ret);
return ret;
}
int nfs_map_group_to_gid(const struct nfs_server *server, const char *name, size_t namelen, kgid_t *gid)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
__u32 id = -1;
int ret = 0;
if (!nfs_map_string_to_numeric(name, namelen, &id))
ret = nfs_idmap_lookup_id(name, namelen, "gid", &id, idmap);
if (ret == 0) {
*gid = make_kgid(&init_user_ns, id);
if (!gid_valid(*gid))
ret = -ERANGE;
}
trace_nfs4_map_group_to_gid(name, namelen, id, ret);
return ret;
}
int nfs_map_uid_to_name(const struct nfs_server *server, kuid_t uid, char *buf, size_t buflen)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
int ret = -EINVAL;
__u32 id;
id = from_kuid(&init_user_ns, uid);
if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
ret = nfs_idmap_lookup_name(id, "user", buf, buflen, idmap);
if (ret < 0)
ret = nfs_map_numeric_to_string(id, buf, buflen);
trace_nfs4_map_uid_to_name(buf, ret, id, ret);
return ret;
}
int nfs_map_gid_to_group(const struct nfs_server *server, kgid_t gid, char *buf, size_t buflen)
{
struct idmap *idmap = server->nfs_client->cl_idmap;
int ret = -EINVAL;
__u32 id;
id = from_kgid(&init_user_ns, gid);
if (!(server->caps & NFS_CAP_UIDGID_NOMAP))
ret = nfs_idmap_lookup_name(id, "group", buf, buflen, idmap);
if (ret < 0)
ret = nfs_map_numeric_to_string(id, buf, buflen);
trace_nfs4_map_gid_to_group(buf, ret, id, ret);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3060_4 |
crossvul-cpp_data_good_1836_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO N N SSSSS TTTTT IIIII TTTTT U U TTTTT EEEEE %
% C O O NN N SS T I T U U T E %
% C O O N N N ESSS T I T U U T EEE %
% C O O N NN SS T I T U U T E %
% CCCC OOO N N SSSSS T IIIII T UUU T EEEEE %
% %
% %
% MagickCore Methods to Consitute an Image %
% %
% Software Design %
% Cristy %
% October 1998 %
% %
% %
% Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/client.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/constitute-private.h"
#include "MagickCore/delegate.h"
#include "MagickCore/geometry.h"
#include "MagickCore/identify.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/statistic.h"
#include "MagickCore/stream.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n s t i t u t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConstituteImage() returns an image from the pixel data you supply.
% The pixel data must be in scanline order top-to-bottom. The data can be
% char, short int, int, float, or double. Float and double require the
% pixels to be normalized [0..1], otherwise [0..QuantumRange]. For example, to
% create a 640x480 image from unsigned red-green-blue character data, use:
%
% image = ConstituteImage(640,480,"RGB",CharPixel,pixels,&exception);
%
% The format of the ConstituteImage method is:
%
% Image *ConstituteImage(const size_t columns,const size_t rows,
% const char *map,const StorageType storage,const void *pixels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o columns: width in pixels of the image.
%
% o rows: height in pixels of the image.
%
% o map: This string reflects the expected ordering of the pixel array.
% It can be any combination or order of R = red, G = green, B = blue,
% A = alpha (0 is transparent), O = opacity (0 is opaque), C = cyan,
% Y = yellow, M = magenta, K = black, I = intensity (for grayscale),
% P = pad.
%
% o storage: Define the data type of the pixels. Float and double types are
% expected to be normalized [0..1] otherwise [0..QuantumRange]. Choose
% from these types: CharPixel, DoublePixel, FloatPixel, IntegerPixel,
% LongPixel, QuantumPixel, or ShortPixel.
%
% o pixels: This array of values contain the pixel components as defined by
% map and type. You must preallocate this array where the expected
% length varies depending on the values of width, height, map, and type.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConstituteImage(const size_t columns,const size_t rows,
const char *map,const StorageType storage,const void *pixels,
ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status;
/*
Allocate image structure.
*/
assert(map != (const char *) NULL);
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",map);
assert(pixels != (void *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage((ImageInfo *) NULL,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if ((columns == 0) || (rows == 0))
ThrowImageException(OptionError,"NonZeroWidthAndHeightRequired");
image->columns=columns;
image->rows=rows;
(void) SetImageBackgroundColor(image,exception);
status=ImportImagePixels(image,0,0,columns,rows,map,storage,pixels,exception);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImage() returns all the properties of an image or image sequence
% except for the pixels. It is much faster and consumes far less memory
% than ReadImage(). On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the PingImage method is:
%
% Image *PingImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Ping the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static size_t PingStream(const Image *magick_unused(image),
const void *magick_unused(pixels),const size_t columns)
{
return(columns);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
MagickExport Image *PingImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*ping_info;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
ping_info=CloneImageInfo(image_info);
ping_info->ping=MagickTrue;
image=ReadStream(ping_info,&PingStream,exception);
if (image != (Image *) NULL)
{
ResetTimer(&image->timer);
if (ping_info->verbose != MagickFalse)
(void) IdentifyImage(image,stdout,MagickFalse,exception);
}
ping_info=DestroyImageInfo(ping_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P i n g I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PingImages() pings one or more images and returns them as an image list.
%
% The format of the PingImage method is:
%
% Image *PingImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PingImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
ping_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Ping image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(image_info,(Image *) NULL,image_info->filename,
(int) image_info->scene,ping_filename,exception);
if (LocaleCompare(ping_filename,image_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
read_info=CloneImageInfo(image_info);
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(PingImage(image_info,exception));
}
(void) CopyMagickString(ping_filename,read_info->filename,MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,ping_filename,
(int) scene,read_info->filename,exception);
image=PingImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
return(PingImage(image_info,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImage() reads an image or image sequence from a file or file handle.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadImage method is:
%
% Image *ReadImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Read the image defined by the file or filename members of
% this structure.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
filename[MagickPathExtent],
magick[MagickPathExtent],
magick_filename[MagickPathExtent];
const char
*value;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
GeometryInfo
geometry_info;
Image
*image,
*next;
ImageInfo
*read_info;
MagickStatusType
flags;
PolicyDomain
domain;
PolicyRights
rights;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image_info->filename != (char *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(magick_filename,read_info->filename,MagickPathExtent);
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) CopyMagickString(magick,read_info->magick,MagickPathExtent);
domain=CoderPolicyDomain;
rights=ReadPolicyRights;
if (IsRightsAuthorized(domain,rights,read_info->magick) == MagickFalse)
{
errno=EPERM;
(void) ThrowMagickException(exception,GetMagickModule(),PolicyError,
"NotAuthorized","`%s'",read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Call appropriate image reader based on image type.
*/
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(read_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
read_info->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
read_info->endian=(*(char *) &lsb_first) == 1 ? LSBEndian :
MSBEndian;
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickSeekableStream(magick_info) != MagickFalse))
{
MagickBooleanType
status;
image=AcquireImage(read_info,exception);
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
if (IsBlobSeekable(image) == MagickFalse)
{
/*
Coder requires a seekable stream.
*/
*read_info->filename='\0';
status=ImageToFile(image,read_info->filename,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
read_info=DestroyImageInfo(read_info);
image=DestroyImage(image);
return((Image *) NULL);
}
read_info->temporary=MagickTrue;
}
(void) CloseBlob(image);
image=DestroyImage(image);
}
image=NewImageList();
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageDecoder(magick_info) == (DecodeImageHandler *) NULL))
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) SetImageInfo(read_info,0,exception);
(void) CopyMagickString(read_info->filename,filename,
MagickPathExtent);
magick_info=GetMagickInfo(read_info->magick,exception);
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetImageDecoder(magick_info) != (DecodeImageHandler *) NULL))
{
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
image=GetImageDecoder(magick_info)(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo(read_info->magick,(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
if (read_info->temporary != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
/*
Let our decoding delegate process the image.
*/
image=AcquireImage(read_info,exception);
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
(void) CopyMagickString(image->filename,read_info->filename,
MagickPathExtent);
*read_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
(void) InvokeDelegate(read_info,image,read_info->magick,(char *) NULL,
exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
image=DestroyImageList(image);
read_info->temporary=MagickTrue;
(void) SetImageInfo(read_info,0,exception);
magick_info=GetMagickInfo(read_info->magick,exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageDecoder(magick_info) == (DecodeImageHandler *) NULL))
{
if (IsPathAccessible(read_info->filename) != MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoDecodeDelegateForThisImageFormat","`%s'",
read_info->magick);
else
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
read_info->filename);
read_info=DestroyImageInfo(read_info);
return((Image *) NULL);
}
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
image=(Image *) (GetImageDecoder(magick_info))(read_info,exception);
if (GetMagickDecoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
if (read_info->temporary != MagickFalse)
{
(void) RelinquishUniqueFileResource(read_info->filename);
read_info->temporary=MagickFalse;
if (image != (Image *) NULL)
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if (image == (Image *) NULL)
{
read_info=DestroyImageInfo(read_info);
return(image);
}
if (exception->severity >= ErrorException)
(void) LogMagickEvent(ExceptionEvent,GetMagickModule(),
"Coder (%s) generated an image despite an error (%d), "
"notify the developers",image->magick,exception->severity);
if (IsBlobTemporary(image) != MagickFalse)
(void) RelinquishUniqueFileResource(read_info->filename);
if ((GetNextImageInList(image) != (Image *) NULL) &&
(IsSceneGeometry(read_info->scenes,MagickFalse) != MagickFalse))
{
Image
*clones;
clones=CloneImages(image,read_info->scenes,exception);
if (clones == (Image *) NULL)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"SubimageSpecificationReturnsNoImages","`%s'",read_info->filename);
else
{
image=DestroyImageList(image);
image=GetFirstImageInList(clones);
}
}
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
char
magick_path[MagickPathExtent],
*property,
timestamp[MagickPathExtent];
const char
*option;
const StringInfo
*profile;
next->taint=MagickFalse;
GetPathComponent(magick_filename,MagickPath,magick_path);
if (*magick_path == '\0' && *next->magick == '\0')
(void) CopyMagickString(next->magick,magick,MagickPathExtent);
(void) CopyMagickString(next->magick_filename,magick_filename,
MagickPathExtent);
if (IsBlobTemporary(image) != MagickFalse)
(void) CopyMagickString(next->filename,filename,MagickPathExtent);
if (next->magick_columns == 0)
next->magick_columns=next->columns;
if (next->magick_rows == 0)
next->magick_rows=next->rows;
value=GetImageProperty(next,"tiff:Orientation",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"exif:Orientation",exception);
if (value != (char *) NULL)
{
next->orientation=(OrientationType) StringToLong(value);
(void) DeleteImageProperty(next,"tiff:Orientation");
(void) DeleteImageProperty(next,"exif:Orientation");
}
value=GetImageProperty(next,"exif:XResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.x;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.x=geometry_info.rho/geometry_info.sigma;
(void) DeleteImageProperty(next,"exif:XResolution");
}
value=GetImageProperty(next,"exif:YResolution",exception);
if (value != (char *) NULL)
{
geometry_info.rho=next->resolution.y;
geometry_info.sigma=1.0;
flags=ParseGeometry(value,&geometry_info);
if (geometry_info.sigma != 0)
next->resolution.y=geometry_info.rho/geometry_info.sigma;
(void) DeleteImageProperty(next,"exif:YResolution");
}
value=GetImageProperty(next,"tiff:ResolutionUnit",exception);
if (value == (char *) NULL)
value=GetImageProperty(next,"exif:ResolutionUnit",exception);
if (value != (char *) NULL)
{
next->units=(ResolutionType) (StringToLong(value)-1);
(void) DeleteImageProperty(next,"exif:ResolutionUnit");
(void) DeleteImageProperty(next,"tiff:ResolutionUnit");
}
if (next->page.width == 0)
next->page.width=next->columns;
if (next->page.height == 0)
next->page.height=next->rows;
option=GetImageOption(read_info,"caption");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"caption",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"comment");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"comment",property,exception);
property=DestroyString(property);
}
option=GetImageOption(read_info,"label");
if (option != (const char *) NULL)
{
property=InterpretImageProperties(read_info,next,option,exception);
(void) SetImageProperty(next,"label",property,exception);
property=DestroyString(property);
}
if (LocaleCompare(next->magick,"TEXT") == 0)
(void) ParseAbsoluteGeometry("0x0+0+0",&next->page);
if ((read_info->extract != (char *) NULL) &&
(read_info->stream == (StreamHandler) NULL))
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(read_info->extract,&geometry);
if ((next->columns != geometry.width) ||
(next->rows != geometry.height))
{
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
Image
*crop_image;
crop_image=CropImage(next,&geometry,exception);
if (crop_image != (Image *) NULL)
ReplaceImageInList(&next,crop_image);
}
else
if (((flags & WidthValue) != 0) || ((flags & HeightValue) != 0))
{
Image
*size_image;
flags=ParseRegionGeometry(next,read_info->extract,&geometry,
exception);
size_image=ResizeImage(next,geometry.width,geometry.height,
next->filter,exception);
if (size_image != (Image *) NULL)
ReplaceImageInList(&next,size_image);
}
}
}
profile=GetImageProfile(next,"icc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"icm");
profile=GetImageProfile(next,"iptc");
if (profile == (const StringInfo *) NULL)
profile=GetImageProfile(next,"8bim");
(void) FormatMagickTime(GetBlobProperties(next)->st_mtime,MagickPathExtent,
timestamp);
(void) SetImageProperty(next,"date:modify",timestamp,exception);
(void) FormatMagickTime(GetBlobProperties(next)->st_ctime,MagickPathExtent,
timestamp);
(void) SetImageProperty(next,"date:create",timestamp,exception);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (next->delay > (size_t) floor(geometry_info.rho+0.5))
next->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (next->delay < (size_t) floor(geometry_info.rho+0.5))
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
next->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
next->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
next->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
if (read_info->verbose != MagickFalse)
(void) IdentifyImage(next,stderr,MagickFalse,exception);
image=next;
}
read_info=DestroyImageInfo(read_info);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadImages() reads one or more images and returns them as an image list.
%
% The format of the ReadImage method is:
%
% Image *ReadImages(ImageInfo *image_info,const char *filename,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadImages(ImageInfo *image_info,const char *filename,
ExceptionInfo *exception)
{
char
read_filename[MagickPathExtent];
Image
*image,
*images;
ImageInfo
*read_info;
/*
Read image list from a file.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
(void) SetImageOption(read_info,"filename",filename);
(void) CopyMagickString(read_info->filename,filename,MagickPathExtent);
(void) InterpretImageFilename(read_info,(Image *) NULL,filename,
(int) read_info->scene,read_filename,exception);
if (LocaleCompare(read_filename,read_info->filename) != 0)
{
ExceptionInfo
*sans;
ssize_t
extent,
scene;
/*
Images of the form image-%d.png[1-5].
*/
sans=AcquireExceptionInfo();
(void) SetImageInfo(read_info,0,sans);
sans=DestroyExceptionInfo(sans);
if (read_info->number_scenes == 0)
{
read_info=DestroyImageInfo(read_info);
return(ReadImage(image_info,exception));
}
(void) CopyMagickString(read_filename,read_info->filename,MagickPathExtent);
images=NewImageList();
extent=(ssize_t) (read_info->scene+read_info->number_scenes);
for (scene=(ssize_t) read_info->scene; scene < (ssize_t) extent; scene++)
{
(void) InterpretImageFilename(image_info,(Image *) NULL,read_filename,
(int) scene,read_info->filename,exception);
image=ReadImage(read_info,exception);
if (image == (Image *) NULL)
continue;
AppendImageToList(&images,image);
}
read_info=DestroyImageInfo(read_info);
return(images);
}
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d I n l i n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadInlineImage() reads a Base64-encoded inline image or image sequence.
% The method returns a NULL if there is a memory shortage or if the image
% cannot be read. On failure, a NULL image is returned and exception
% describes the reason for the failure.
%
% The format of the ReadInlineImage method is:
%
% Image *ReadInlineImage(const ImageInfo *image_info,const char *content,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o content: the image encoded in Base64.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ReadInlineImage(const ImageInfo *image_info,
const char *content,ExceptionInfo *exception)
{
Image
*image;
ImageInfo
*read_info;
unsigned char
*blob;
size_t
length;
register const char
*p;
/*
Skip over header (e.g. data:image/gif;base64,).
*/
image=NewImageList();
for (p=content; (*p != ',') && (*p != '\0'); p++) ;
if (*p == '\0')
ThrowReaderException(CorruptImageError,"CorruptImage");
p++;
length=0;
blob=Base64Decode(p,&length);
if (length == 0)
ThrowReaderException(CorruptImageError,"CorruptImage");
read_info=CloneImageInfo(image_info);
(void) SetImageInfoProgressMonitor(read_info,(MagickProgressMonitor) NULL,
(void *) NULL);
*read_info->filename='\0';
*read_info->magick='\0';
image=BlobToImage(read_info,blob,length,exception);
blob=(unsigned char *) RelinquishMagickMemory(blob);
read_info=DestroyImageInfo(read_info);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImage() writes an image or an image sequence to a file or file handle.
% If writing to a file is on disk, the name is defined by the filename member
% of the image structure. WriteImage() returns MagickFalse is there is a
% memory shortage or if the image cannot be written. Check the exception
% member of image to determine the cause for any failure.
%
% The format of the WriteImage method is:
%
% MagickBooleanType WriteImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
char
filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
status,
temporary;
PolicyDomain
domain;
PolicyRights
rights;
/*
Determine image type from filename prefix or suffix (e.g. image.jpg).
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
sans_exception=AcquireExceptionInfo();
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,image->filename,MagickPathExtent);
(void) SetImageInfo(write_info,1,sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
(void) CopyMagickString(image->filename,write_info->filename,MagickPathExtent);
domain=CoderPolicyDomain;
rights=WritePolicyRights;
if (IsRightsAuthorized(domain,rights,write_info->magick) == MagickFalse)
{
sans_exception=DestroyExceptionInfo(sans_exception);
write_info=DestroyImageInfo(write_info);
errno=EPERM;
ThrowBinaryException(PolicyError,"NotAuthorized",filename);
}
/*
Call appropriate image reader based on image type.
*/
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (magick_info != (const MagickInfo *) NULL)
{
if (GetMagickEndianSupport(magick_info) == MagickFalse)
image->endian=UndefinedEndian;
else
if ((image_info->endian == UndefinedEndian) &&
(GetMagickRawSupport(magick_info) != MagickFalse))
{
unsigned long
lsb_first;
lsb_first=1;
image->endian=(*(char *) &lsb_first) == 1 ? LSBEndian : MSBEndian;
}
}
(void) SyncImageProfiles(image);
DisassociateImageStream(image);
option=GetImageOption(image_info,"delegate:bimodal");
if ((IfMagickTrue(IsStringTrue(option))) &&
(write_info->page == (char *) NULL) &&
(GetPreviousImageInList(image) == (Image *) NULL) &&
(GetNextImageInList(image) == (Image *) NULL) &&
(IfMagickFalse(IsTaintImage(image))) )
{
delegate_info=GetDelegateInfo(image->magick,write_info->magick,exception);
if ((delegate_info != (const DelegateInfo *) NULL) &&
(GetDelegateMode(delegate_info) == 0) &&
(IsPathAccessible(image->magick_filename) != MagickFalse))
{
/*
Process image with bi-modal delegate.
*/
(void) CopyMagickString(image->filename,image->magick_filename,
MagickPathExtent);
status=InvokeDelegate(write_info,image,image->magick,
write_info->magick,exception);
write_info=DestroyImageInfo(write_info);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
return(status);
}
}
status=MagickFalse;
temporary=MagickFalse;
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickSeekableStream(magick_info) != MagickFalse))
{
char
image_filename[MagickPathExtent];
(void) CopyMagickString(image_filename,image->filename,MagickPathExtent);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
(void) CopyMagickString(image->filename, image_filename,MagickPathExtent);
if (status != MagickFalse)
{
if (IsBlobSeekable(image) == MagickFalse)
{
/*
A seekable stream is required by the encoder.
*/
write_info->adjoin=MagickTrue;
(void) CopyMagickString(write_info->filename,image->filename,
MagickPathExtent);
(void) AcquireUniqueFilename(image->filename);
temporary=MagickTrue;
}
(void) CloseBlob(image);
}
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetImageEncoder(magick_info) != (EncodeImageHandler *) NULL))
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=GetImageEncoder(magick_info)(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
else
{
delegate_info=GetDelegateInfo((char *) NULL,write_info->magick,exception);
if (delegate_info != (DelegateInfo *) NULL)
{
/*
Process the image with delegate.
*/
*write_info->filename='\0';
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
LockSemaphoreInfo(delegate_info->semaphore);
status=InvokeDelegate(write_info,image,(char *) NULL,
write_info->magick,exception);
if (GetDelegateThreadSupport(delegate_info) == MagickFalse)
UnlockSemaphoreInfo(delegate_info->semaphore);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
else
{
sans_exception=AcquireExceptionInfo();
magick_info=GetMagickInfo(write_info->magick,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((write_info->affirm == MagickFalse) &&
(magick_info == (const MagickInfo *) NULL))
{
(void) CopyMagickString(write_info->magick,image->magick,
MagickPathExtent);
magick_info=GetMagickInfo(write_info->magick,exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageEncoder(magick_info) == (EncodeImageHandler *) NULL))
{
char
extension[MagickPathExtent];
GetPathComponent(image->filename,ExtensionPath,extension);
if (*extension != '\0')
magick_info=GetMagickInfo(extension,exception);
else
magick_info=GetMagickInfo(image->magick,exception);
(void) CopyMagickString(image->filename,filename,MagickPathExtent);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageEncoder(magick_info) == (EncodeImageHandler *) NULL))
{
magick_info=GetMagickInfo(image->magick,exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetImageEncoder(magick_info) == (EncodeImageHandler *) NULL))
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"NoEncodeDelegateForThisImageFormat",
"`%s'",write_info->magick);
}
if ((magick_info != (const MagickInfo *) NULL) &&
(GetImageEncoder(magick_info) != (EncodeImageHandler *) NULL))
{
/*
Call appropriate image writer based on image type.
*/
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
LockSemaphoreInfo(magick_info->semaphore);
status=GetImageEncoder(magick_info)(write_info,image,exception);
if (GetMagickEncoderThreadSupport(magick_info) == MagickFalse)
UnlockSemaphoreInfo(magick_info->semaphore);
}
}
}
if (temporary != MagickFalse)
{
/*
Copy temporary image file to permanent.
*/
status=OpenBlob(write_info,image,ReadBinaryBlobMode,exception);
if (status != MagickFalse)
{
(void) RelinquishUniqueFileResource(write_info->filename);
status=ImageToFile(image,write_info->filename,exception);
}
(void) CloseBlob(image);
(void) RelinquishUniqueFileResource(image->filename);
(void) CopyMagickString(image->filename,write_info->filename,
MagickPathExtent);
}
if ((LocaleCompare(write_info->magick,"info") != 0) &&
(write_info->verbose != MagickFalse))
(void) IdentifyImage(image,stdout,MagickFalse,exception);
write_info=DestroyImageInfo(write_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteImages() writes an image sequence into one or more files. While
% WriteImage() can write an image sequence, it is limited to writing
% the sequence into a single file using a format which supports multiple
% frames. WriteImages(), however, does not have this limitation, instead it
% generates multiple output files if necessary (or when requested). When
% ImageInfo's adjoin flag is set to MagickFalse, the file name is expected
% to include a printf-style formatting string for the frame number (e.g.
% "image%02d.png").
%
% The format of the WriteImages method is:
%
% MagickBooleanType WriteImages(const ImageInfo *image_info,Image *images,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o images: the image list.
%
% o filename: the image filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WriteImages(const ImageInfo *image_info,
Image *images,const char *filename,ExceptionInfo *exception)
{
#define WriteImageTag "Write/Image"
ExceptionInfo
*sans_exception;
ImageInfo
*write_info;
MagickBooleanType
proceed;
MagickOffsetType
progress;
MagickProgressMonitor
progress_monitor;
MagickSizeType
number_images;
MagickStatusType
status;
register Image
*p;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
write_info=CloneImageInfo(image_info);
*write_info->magick='\0';
images=GetFirstImageInList(images);
if (filename != (const char *) NULL)
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
(void) CopyMagickString(p->filename,filename,MagickPathExtent);
(void) CopyMagickString(write_info->filename,images->filename,MagickPathExtent);
sans_exception=AcquireExceptionInfo();
(void) SetImageInfo(write_info,(unsigned int) GetImageListLength(images),
sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (*write_info->magick == '\0')
(void) CopyMagickString(write_info->magick,images->magick,MagickPathExtent);
p=images;
for ( ; GetNextImageInList(p) != (Image *) NULL; p=GetNextImageInList(p))
{
register Image
*next;
next=GetNextImageInList(p);
if (next == (Image *) NULL)
break;
if (p->scene >= next->scene)
{
register ssize_t
i;
/*
Generate consistent scene numbers.
*/
i=(ssize_t) images->scene;
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
p->scene=(size_t) i++;
break;
}
}
/*
Write images.
*/
status=MagickTrue;
progress_monitor=(MagickProgressMonitor) NULL;
progress=0;
number_images=GetImageListLength(images);
for (p=images; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (number_images != 1)
progress_monitor=SetImageProgressMonitor(p,(MagickProgressMonitor) NULL,
p->client_data);
status&=WriteImage(write_info,p,exception);
if (number_images != 1)
(void) SetImageProgressMonitor(p,progress_monitor,p->client_data);
if (write_info->adjoin != MagickFalse)
break;
if (number_images != 1)
{
proceed=SetImageProgress(p,WriteImageTag,progress++,number_images);
if (proceed == MagickFalse)
break;
}
}
write_info=DestroyImageInfo(write_info);
return(status != 0 ? MagickTrue : MagickFalse);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_1836_0 |
crossvul-cpp_data_good_528_4 | /* vi:set ts=8 sts=4 sw=4 noet:
*
* VIM - Vi IMproved by Bram Moolenaar
*
* Do ":help uganda" in Vim to read copying and usage conditions.
* Do ":help credits" in Vim to see a list of people who contributed.
* See README.txt for an overview of the Vim source code.
*/
#include "vim.h"
#ifdef AMIGA
# include <time.h> /* for time() */
#endif
/*
* Vim originated from Stevie version 3.6 (Fish disk 217) by GRWalter (Fred)
* It has been changed beyond recognition since then.
*
* Differences between version 7.4 and 8.x can be found with ":help version8".
* Differences between version 6.4 and 7.x can be found with ":help version7".
* Differences between version 5.8 and 6.x can be found with ":help version6".
* Differences between version 4.x and 5.x can be found with ":help version5".
* Differences between version 3.0 and 4.x can be found with ":help version4".
* All the remarks about older versions have been removed, they are not very
* interesting.
*/
#include "version.h"
char *Version = VIM_VERSION_SHORT;
static char *mediumVersion = VIM_VERSION_MEDIUM;
#if defined(HAVE_DATE_TIME) || defined(PROTO)
# if (defined(VMS) && defined(VAXC)) || defined(PROTO)
char longVersion[sizeof(VIM_VERSION_LONG_DATE) + sizeof(__DATE__)
+ sizeof(__TIME__) + 3];
void
init_longVersion(void)
{
/*
* Construct the long version string. Necessary because
* VAX C can't catenate strings in the preprocessor.
*/
strcpy(longVersion, VIM_VERSION_LONG_DATE);
strcat(longVersion, __DATE__);
strcat(longVersion, " ");
strcat(longVersion, __TIME__);
strcat(longVersion, ")");
}
# else
void
init_longVersion(void)
{
char *date_time = __DATE__ " " __TIME__;
char *msg = _("%s (%s, compiled %s)");
size_t len = strlen(msg)
+ strlen(VIM_VERSION_LONG_ONLY)
+ strlen(VIM_VERSION_DATE_ONLY)
+ strlen(date_time);
longVersion = (char *)alloc((unsigned)len);
if (longVersion == NULL)
longVersion = VIM_VERSION_LONG;
else
vim_snprintf(longVersion, len, msg,
VIM_VERSION_LONG_ONLY, VIM_VERSION_DATE_ONLY, date_time);
}
# endif
#else
char *longVersion = VIM_VERSION_LONG;
void
init_longVersion(void)
{
// nothing to do
}
#endif
static char *(features[]) =
{
#ifdef HAVE_ACL
"+acl",
#else
"-acl",
#endif
#ifdef AMIGA /* only for Amiga systems */
# ifdef FEAT_ARP
"+ARP",
# else
"-ARP",
# endif
#endif
#ifdef FEAT_ARABIC
"+arabic",
#else
"-arabic",
#endif
"+autocmd",
#ifdef FEAT_AUTOCHDIR
"+autochdir",
#else
"-autochdir",
#endif
#ifdef FEAT_AUTOSERVERNAME
"+autoservername",
#else
"-autoservername",
#endif
#ifdef FEAT_BEVAL_GUI
"+balloon_eval",
#else
"-balloon_eval",
#endif
#ifdef FEAT_BEVAL_TERM
"+balloon_eval_term",
#else
"-balloon_eval_term",
#endif
#ifdef FEAT_BROWSE
"+browse",
#else
"-browse",
#endif
#ifdef NO_BUILTIN_TCAPS
"-builtin_terms",
#endif
#ifdef SOME_BUILTIN_TCAPS
"+builtin_terms",
#endif
#ifdef ALL_BUILTIN_TCAPS
"++builtin_terms",
#endif
#ifdef FEAT_BYTEOFF
"+byte_offset",
#else
"-byte_offset",
#endif
#ifdef FEAT_JOB_CHANNEL
"+channel",
#else
"-channel",
#endif
#ifdef FEAT_CINDENT
"+cindent",
#else
"-cindent",
#endif
#ifdef FEAT_CLIENTSERVER
"+clientserver",
#else
"-clientserver",
#endif
#ifdef FEAT_CLIPBOARD
"+clipboard",
#else
"-clipboard",
#endif
#ifdef FEAT_CMDL_COMPL
"+cmdline_compl",
#else
"-cmdline_compl",
#endif
#ifdef FEAT_CMDHIST
"+cmdline_hist",
#else
"-cmdline_hist",
#endif
#ifdef FEAT_CMDL_INFO
"+cmdline_info",
#else
"-cmdline_info",
#endif
#ifdef FEAT_COMMENTS
"+comments",
#else
"-comments",
#endif
#ifdef FEAT_CONCEAL
"+conceal",
#else
"-conceal",
#endif
#ifdef FEAT_CRYPT
"+cryptv",
#else
"-cryptv",
#endif
#ifdef FEAT_CSCOPE
"+cscope",
#else
"-cscope",
#endif
"+cursorbind",
#ifdef CURSOR_SHAPE
"+cursorshape",
#else
"-cursorshape",
#endif
#if defined(FEAT_CON_DIALOG) && defined(FEAT_GUI_DIALOG)
"+dialog_con_gui",
#else
# if defined(FEAT_CON_DIALOG)
"+dialog_con",
# else
# if defined(FEAT_GUI_DIALOG)
"+dialog_gui",
# else
"-dialog",
# endif
# endif
#endif
#ifdef FEAT_DIFF
"+diff",
#else
"-diff",
#endif
#ifdef FEAT_DIGRAPHS
"+digraphs",
#else
"-digraphs",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_DIRECTX
"+directx",
# else
"-directx",
# endif
#endif
#ifdef FEAT_DND
"+dnd",
#else
"-dnd",
#endif
#ifdef EBCDIC
"+ebcdic",
#else
"-ebcdic",
#endif
#ifdef FEAT_EMACS_TAGS
"+emacs_tags",
#else
"-emacs_tags",
#endif
#ifdef FEAT_EVAL
"+eval",
#else
"-eval",
#endif
"+ex_extra",
#ifdef FEAT_SEARCH_EXTRA
"+extra_search",
#else
"-extra_search",
#endif
#ifdef FEAT_FKMAP
"+farsi",
#else
"-farsi",
#endif
#ifdef FEAT_SEARCHPATH
"+file_in_path",
#else
"-file_in_path",
#endif
#ifdef FEAT_FIND_ID
"+find_in_path",
#else
"-find_in_path",
#endif
#ifdef FEAT_FLOAT
"+float",
#else
"-float",
#endif
#ifdef FEAT_FOLDING
"+folding",
#else
"-folding",
#endif
#ifdef FEAT_FOOTER
"+footer",
#else
"-footer",
#endif
/* only interesting on Unix systems */
#if !defined(USE_SYSTEM) && defined(UNIX)
"+fork()",
#endif
#ifdef FEAT_GETTEXT
# ifdef DYNAMIC_GETTEXT
"+gettext/dyn",
# else
"+gettext",
# endif
#else
"-gettext",
#endif
#ifdef FEAT_HANGULIN
"+hangul_input",
#else
"-hangul_input",
#endif
#if (defined(HAVE_ICONV_H) && defined(USE_ICONV)) || defined(DYNAMIC_ICONV)
# ifdef DYNAMIC_ICONV
"+iconv/dyn",
# else
"+iconv",
# endif
#else
"-iconv",
#endif
#ifdef FEAT_INS_EXPAND
"+insert_expand",
#else
"-insert_expand",
#endif
#ifdef FEAT_JOB_CHANNEL
"+job",
#else
"-job",
#endif
#ifdef FEAT_JUMPLIST
"+jumplist",
#else
"-jumplist",
#endif
#ifdef FEAT_KEYMAP
"+keymap",
#else
"-keymap",
#endif
#ifdef FEAT_EVAL
"+lambda",
#else
"-lambda",
#endif
#ifdef FEAT_LANGMAP
"+langmap",
#else
"-langmap",
#endif
#ifdef FEAT_LIBCALL
"+libcall",
#else
"-libcall",
#endif
#ifdef FEAT_LINEBREAK
"+linebreak",
#else
"-linebreak",
#endif
#ifdef FEAT_LISP
"+lispindent",
#else
"-lispindent",
#endif
"+listcmds",
#ifdef FEAT_LOCALMAP
"+localmap",
#else
"-localmap",
#endif
#ifdef FEAT_LUA
# ifdef DYNAMIC_LUA
"+lua/dyn",
# else
"+lua",
# endif
#else
"-lua",
#endif
#ifdef FEAT_MENU
"+menu",
#else
"-menu",
#endif
#ifdef FEAT_SESSION
"+mksession",
#else
"-mksession",
#endif
#ifdef FEAT_MODIFY_FNAME
"+modify_fname",
#else
"-modify_fname",
#endif
#ifdef FEAT_MOUSE
"+mouse",
# ifdef FEAT_MOUSESHAPE
"+mouseshape",
# else
"-mouseshape",
# endif
# else
"-mouse",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_DEC
"+mouse_dec",
# else
"-mouse_dec",
# endif
# ifdef FEAT_MOUSE_GPM
"+mouse_gpm",
# else
"-mouse_gpm",
# endif
# ifdef FEAT_MOUSE_JSB
"+mouse_jsbterm",
# else
"-mouse_jsbterm",
# endif
# ifdef FEAT_MOUSE_NET
"+mouse_netterm",
# else
"-mouse_netterm",
# endif
#endif
#ifdef __QNX__
# ifdef FEAT_MOUSE_PTERM
"+mouse_pterm",
# else
"-mouse_pterm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_MOUSE_SGR
"+mouse_sgr",
# else
"-mouse_sgr",
# endif
# ifdef FEAT_SYSMOUSE
"+mouse_sysmouse",
# else
"-mouse_sysmouse",
# endif
# ifdef FEAT_MOUSE_URXVT
"+mouse_urxvt",
# else
"-mouse_urxvt",
# endif
# ifdef FEAT_MOUSE_XTERM
"+mouse_xterm",
# else
"-mouse_xterm",
# endif
#endif
#ifdef FEAT_MBYTE_IME
# ifdef DYNAMIC_IME
"+multi_byte_ime/dyn",
# else
"+multi_byte_ime",
# endif
#else
# ifdef FEAT_MBYTE
"+multi_byte",
# else
"-multi_byte",
# endif
#endif
#ifdef FEAT_MULTI_LANG
"+multi_lang",
#else
"-multi_lang",
#endif
#ifdef FEAT_MZSCHEME
# ifdef DYNAMIC_MZSCHEME
"+mzscheme/dyn",
# else
"+mzscheme",
# endif
#else
"-mzscheme",
#endif
#ifdef FEAT_NETBEANS_INTG
"+netbeans_intg",
#else
"-netbeans_intg",
#endif
#ifdef FEAT_NUM64
"+num64",
#else
"-num64",
#endif
#ifdef FEAT_GUI_W32
# ifdef FEAT_OLE
"+ole",
# else
"-ole",
# endif
#endif
#ifdef FEAT_EVAL
"+packages",
#else
"-packages",
#endif
#ifdef FEAT_PATH_EXTRA
"+path_extra",
#else
"-path_extra",
#endif
#ifdef FEAT_PERL
# ifdef DYNAMIC_PERL
"+perl/dyn",
# else
"+perl",
# endif
#else
"-perl",
#endif
#ifdef FEAT_PERSISTENT_UNDO
"+persistent_undo",
#else
"-persistent_undo",
#endif
#ifdef FEAT_PRINTER
# ifdef FEAT_POSTSCRIPT
"+postscript",
# else
"-postscript",
# endif
"+printer",
#else
"-printer",
#endif
#ifdef FEAT_PROFILE
"+profile",
#else
"-profile",
#endif
#ifdef FEAT_PYTHON
# ifdef DYNAMIC_PYTHON
"+python/dyn",
# else
"+python",
# endif
#else
"-python",
#endif
#ifdef FEAT_PYTHON3
# ifdef DYNAMIC_PYTHON3
"+python3/dyn",
# else
"+python3",
# endif
#else
"-python3",
#endif
#ifdef FEAT_QUICKFIX
"+quickfix",
#else
"-quickfix",
#endif
#ifdef FEAT_RELTIME
"+reltime",
#else
"-reltime",
#endif
#ifdef FEAT_RIGHTLEFT
"+rightleft",
#else
"-rightleft",
#endif
#ifdef FEAT_RUBY
# ifdef DYNAMIC_RUBY
"+ruby/dyn",
# else
"+ruby",
# endif
#else
"-ruby",
#endif
"+scrollbind",
#ifdef FEAT_SIGNS
"+signs",
#else
"-signs",
#endif
#ifdef FEAT_SMARTINDENT
"+smartindent",
#else
"-smartindent",
#endif
#ifdef STARTUPTIME
"+startuptime",
#else
"-startuptime",
#endif
#ifdef FEAT_STL_OPT
"+statusline",
#else
"-statusline",
#endif
#ifdef FEAT_SUN_WORKSHOP
"+sun_workshop",
#else
"-sun_workshop",
#endif
#ifdef FEAT_SYN_HL
"+syntax",
#else
"-syntax",
#endif
/* only interesting on Unix systems */
#if defined(USE_SYSTEM) && defined(UNIX)
"+system()",
#endif
#ifdef FEAT_TAG_BINS
"+tag_binary",
#else
"-tag_binary",
#endif
#ifdef FEAT_TAG_OLDSTATIC
"+tag_old_static",
#else
"-tag_old_static",
#endif
#ifdef FEAT_TAG_ANYWHITE
"+tag_any_white",
#else
"-tag_any_white",
#endif
#ifdef FEAT_TCL
# ifdef DYNAMIC_TCL
"+tcl/dyn",
# else
"+tcl",
# endif
#else
"-tcl",
#endif
#ifdef FEAT_TERMGUICOLORS
"+termguicolors",
#else
"-termguicolors",
#endif
#ifdef FEAT_TERMINAL
"+terminal",
#else
"-terminal",
#endif
#if defined(UNIX)
/* only Unix can have terminfo instead of termcap */
# ifdef TERMINFO
"+terminfo",
# else
"-terminfo",
# endif
#endif
#ifdef FEAT_TERMRESPONSE
"+termresponse",
#else
"-termresponse",
#endif
#ifdef FEAT_TEXTOBJ
"+textobjects",
#else
"-textobjects",
#endif
#ifdef FEAT_TEXT_PROP
"+textprop",
#else
"-textprop",
#endif
#if !defined(UNIX)
/* unix always includes termcap support */
# ifdef HAVE_TGETENT
"+tgetent",
# else
"-tgetent",
# endif
#endif
#ifdef FEAT_TIMERS
"+timers",
#else
"-timers",
#endif
#ifdef FEAT_TITLE
"+title",
#else
"-title",
#endif
#ifdef FEAT_TOOLBAR
"+toolbar",
#else
"-toolbar",
#endif
#ifdef FEAT_USR_CMDS
"+user_commands",
#else
"-user_commands",
#endif
#ifdef FEAT_VARTABS
"+vartabs",
#else
"-vartabs",
#endif
"+vertsplit",
#ifdef FEAT_VIRTUALEDIT
"+virtualedit",
#else
"-virtualedit",
#endif
"+visual",
#ifdef FEAT_VISUALEXTRA
"+visualextra",
#else
"-visualextra",
#endif
#ifdef FEAT_VIMINFO
"+viminfo",
#else
"-viminfo",
#endif
"+vreplace",
#ifdef WIN3264
# ifdef FEAT_VTP
"+vtp",
# else
"-vtp",
# endif
#endif
#ifdef FEAT_WILDIGN
"+wildignore",
#else
"-wildignore",
#endif
#ifdef FEAT_WILDMENU
"+wildmenu",
#else
"-wildmenu",
#endif
"+windows",
#ifdef FEAT_WRITEBACKUP
"+writebackup",
#else
"-writebackup",
#endif
#if defined(UNIX) || defined(VMS)
# ifdef FEAT_X11
"+X11",
# else
"-X11",
# endif
#endif
#ifdef FEAT_XFONTSET
"+xfontset",
#else
"-xfontset",
#endif
#ifdef FEAT_XIM
"+xim",
#else
"-xim",
#endif
#ifdef WIN3264
# ifdef FEAT_XPM_W32
"+xpm_w32",
# else
"-xpm_w32",
# endif
#else
# ifdef HAVE_XPM
"+xpm",
# else
"-xpm",
# endif
#endif
#if defined(UNIX) || defined(VMS)
# ifdef USE_XSMP_INTERACT
"+xsmp_interact",
# else
# ifdef USE_XSMP
"+xsmp",
# else
"-xsmp",
# endif
# endif
# ifdef FEAT_XCLIPBOARD
"+xterm_clipboard",
# else
"-xterm_clipboard",
# endif
#endif
#ifdef FEAT_XTERM_SAVE
"+xterm_save",
#else
"-xterm_save",
#endif
NULL
};
static int included_patches[] =
{ /* Add new patch number below this line */
/**/
633,
/**/
632,
/**/
631,
/**/
630,
/**/
629,
/**/
628,
/**/
627,
/**/
626,
/**/
625,
/**/
624,
/**/
623,
/**/
622,
/**/
621,
/**/
620,
/**/
619,
/**/
618,
/**/
617,
/**/
616,
/**/
615,
/**/
614,
/**/
613,
/**/
612,
/**/
611,
/**/
610,
/**/
609,
/**/
608,
/**/
607,
/**/
606,
/**/
605,
/**/
604,
/**/
603,
/**/
602,
/**/
601,
/**/
600,
/**/
599,
/**/
598,
/**/
597,
/**/
596,
/**/
595,
/**/
594,
/**/
593,
/**/
592,
/**/
591,
/**/
590,
/**/
589,
/**/
588,
/**/
587,
/**/
586,
/**/
585,
/**/
584,
/**/
583,
/**/
582,
/**/
581,
/**/
580,
/**/
579,
/**/
578,
/**/
577,
/**/
576,
/**/
575,
/**/
574,
/**/
573,
/**/
572,
/**/
571,
/**/
570,
/**/
569,
/**/
568,
/**/
567,
/**/
566,
/**/
565,
/**/
564,
/**/
563,
/**/
562,
/**/
561,
/**/
560,
/**/
559,
/**/
558,
/**/
557,
/**/
556,
/**/
555,
/**/
554,
/**/
553,
/**/
552,
/**/
551,
/**/
550,
/**/
549,
/**/
548,
/**/
547,
/**/
546,
/**/
545,
/**/
544,
/**/
543,
/**/
542,
/**/
541,
/**/
540,
/**/
539,
/**/
538,
/**/
537,
/**/
536,
/**/
535,
/**/
534,
/**/
533,
/**/
532,
/**/
531,
/**/
530,
/**/
529,
/**/
528,
/**/
527,
/**/
526,
/**/
525,
/**/
524,
/**/
523,
/**/
522,
/**/
521,
/**/
520,
/**/
519,
/**/
518,
/**/
517,
/**/
516,
/**/
515,
/**/
514,
/**/
513,
/**/
512,
/**/
511,
/**/
510,
/**/
509,
/**/
508,
/**/
507,
/**/
506,
/**/
505,
/**/
504,
/**/
503,
/**/
502,
/**/
501,
/**/
500,
/**/
499,
/**/
498,
/**/
497,
/**/
496,
/**/
495,
/**/
494,
/**/
493,
/**/
492,
/**/
491,
/**/
490,
/**/
489,
/**/
488,
/**/
487,
/**/
486,
/**/
485,
/**/
484,
/**/
483,
/**/
482,
/**/
481,
/**/
480,
/**/
479,
/**/
478,
/**/
477,
/**/
476,
/**/
475,
/**/
474,
/**/
473,
/**/
472,
/**/
471,
/**/
470,
/**/
469,
/**/
468,
/**/
467,
/**/
466,
/**/
465,
/**/
464,
/**/
463,
/**/
462,
/**/
461,
/**/
460,
/**/
459,
/**/
458,
/**/
457,
/**/
456,
/**/
455,
/**/
454,
/**/
453,
/**/
452,
/**/
451,
/**/
450,
/**/
449,
/**/
448,
/**/
447,
/**/
446,
/**/
445,
/**/
444,
/**/
443,
/**/
442,
/**/
441,
/**/
440,
/**/
439,
/**/
438,
/**/
437,
/**/
436,
/**/
435,
/**/
434,
/**/
433,
/**/
432,
/**/
431,
/**/
430,
/**/
429,
/**/
428,
/**/
427,
/**/
426,
/**/
425,
/**/
424,
/**/
423,
/**/
422,
/**/
421,
/**/
420,
/**/
419,
/**/
418,
/**/
417,
/**/
416,
/**/
415,
/**/
414,
/**/
413,
/**/
412,
/**/
411,
/**/
410,
/**/
409,
/**/
408,
/**/
407,
/**/
406,
/**/
405,
/**/
404,
/**/
403,
/**/
402,
/**/
401,
/**/
400,
/**/
399,
/**/
398,
/**/
397,
/**/
396,
/**/
395,
/**/
394,
/**/
393,
/**/
392,
/**/
391,
/**/
390,
/**/
389,
/**/
388,
/**/
387,
/**/
386,
/**/
385,
/**/
384,
/**/
383,
/**/
382,
/**/
381,
/**/
380,
/**/
379,
/**/
378,
/**/
377,
/**/
376,
/**/
375,
/**/
374,
/**/
373,
/**/
372,
/**/
371,
/**/
370,
/**/
369,
/**/
368,
/**/
367,
/**/
366,
/**/
365,
/**/
364,
/**/
363,
/**/
362,
/**/
361,
/**/
360,
/**/
359,
/**/
358,
/**/
357,
/**/
356,
/**/
355,
/**/
354,
/**/
353,
/**/
352,
/**/
351,
/**/
350,
/**/
349,
/**/
348,
/**/
347,
/**/
346,
/**/
345,
/**/
344,
/**/
343,
/**/
342,
/**/
341,
/**/
340,
/**/
339,
/**/
338,
/**/
337,
/**/
336,
/**/
335,
/**/
334,
/**/
333,
/**/
332,
/**/
331,
/**/
330,
/**/
329,
/**/
328,
/**/
327,
/**/
326,
/**/
325,
/**/
324,
/**/
323,
/**/
322,
/**/
321,
/**/
320,
/**/
319,
/**/
318,
/**/
317,
/**/
316,
/**/
315,
/**/
314,
/**/
313,
/**/
312,
/**/
311,
/**/
310,
/**/
309,
/**/
308,
/**/
307,
/**/
306,
/**/
305,
/**/
304,
/**/
303,
/**/
302,
/**/
301,
/**/
300,
/**/
299,
/**/
298,
/**/
297,
/**/
296,
/**/
295,
/**/
294,
/**/
293,
/**/
292,
/**/
291,
/**/
290,
/**/
289,
/**/
288,
/**/
287,
/**/
286,
/**/
285,
/**/
284,
/**/
283,
/**/
282,
/**/
281,
/**/
280,
/**/
279,
/**/
278,
/**/
277,
/**/
276,
/**/
275,
/**/
274,
/**/
273,
/**/
272,
/**/
271,
/**/
270,
/**/
269,
/**/
268,
/**/
267,
/**/
266,
/**/
265,
/**/
264,
/**/
263,
/**/
262,
/**/
261,
/**/
260,
/**/
259,
/**/
258,
/**/
257,
/**/
256,
/**/
255,
/**/
254,
/**/
253,
/**/
252,
/**/
251,
/**/
250,
/**/
249,
/**/
248,
/**/
247,
/**/
246,
/**/
245,
/**/
244,
/**/
243,
/**/
242,
/**/
241,
/**/
240,
/**/
239,
/**/
238,
/**/
237,
/**/
236,
/**/
235,
/**/
234,
/**/
233,
/**/
232,
/**/
231,
/**/
230,
/**/
229,
/**/
228,
/**/
227,
/**/
226,
/**/
225,
/**/
224,
/**/
223,
/**/
222,
/**/
221,
/**/
220,
/**/
219,
/**/
218,
/**/
217,
/**/
216,
/**/
215,
/**/
214,
/**/
213,
/**/
212,
/**/
211,
/**/
210,
/**/
209,
/**/
208,
/**/
207,
/**/
206,
/**/
205,
/**/
204,
/**/
203,
/**/
202,
/**/
201,
/**/
200,
/**/
199,
/**/
198,
/**/
197,
/**/
196,
/**/
195,
/**/
194,
/**/
193,
/**/
192,
/**/
191,
/**/
190,
/**/
189,
/**/
188,
/**/
187,
/**/
186,
/**/
185,
/**/
184,
/**/
183,
/**/
182,
/**/
181,
/**/
180,
/**/
179,
/**/
178,
/**/
177,
/**/
176,
/**/
175,
/**/
174,
/**/
173,
/**/
172,
/**/
171,
/**/
170,
/**/
169,
/**/
168,
/**/
167,
/**/
166,
/**/
165,
/**/
164,
/**/
163,
/**/
162,
/**/
161,
/**/
160,
/**/
159,
/**/
158,
/**/
157,
/**/
156,
/**/
155,
/**/
154,
/**/
153,
/**/
152,
/**/
151,
/**/
150,
/**/
149,
/**/
148,
/**/
147,
/**/
146,
/**/
145,
/**/
144,
/**/
143,
/**/
142,
/**/
141,
/**/
140,
/**/
139,
/**/
138,
/**/
137,
/**/
136,
/**/
135,
/**/
134,
/**/
133,
/**/
132,
/**/
131,
/**/
130,
/**/
129,
/**/
128,
/**/
127,
/**/
126,
/**/
125,
/**/
124,
/**/
123,
/**/
122,
/**/
121,
/**/
120,
/**/
119,
/**/
118,
/**/
117,
/**/
116,
/**/
115,
/**/
114,
/**/
113,
/**/
112,
/**/
111,
/**/
110,
/**/
109,
/**/
108,
/**/
107,
/**/
106,
/**/
105,
/**/
104,
/**/
103,
/**/
102,
/**/
101,
/**/
100,
/**/
99,
/**/
98,
/**/
97,
/**/
96,
/**/
95,
/**/
94,
/**/
93,
/**/
92,
/**/
91,
/**/
90,
/**/
89,
/**/
88,
/**/
87,
/**/
86,
/**/
85,
/**/
84,
/**/
83,
/**/
82,
/**/
81,
/**/
80,
/**/
79,
/**/
78,
/**/
77,
/**/
76,
/**/
75,
/**/
74,
/**/
73,
/**/
72,
/**/
71,
/**/
70,
/**/
69,
/**/
68,
/**/
67,
/**/
66,
/**/
65,
/**/
64,
/**/
63,
/**/
62,
/**/
61,
/**/
60,
/**/
59,
/**/
58,
/**/
57,
/**/
56,
/**/
55,
/**/
54,
/**/
53,
/**/
52,
/**/
51,
/**/
50,
/**/
49,
/**/
48,
/**/
47,
/**/
46,
/**/
45,
/**/
44,
/**/
43,
/**/
42,
/**/
41,
/**/
40,
/**/
39,
/**/
38,
/**/
37,
/**/
36,
/**/
35,
/**/
34,
/**/
33,
/**/
32,
/**/
31,
/**/
30,
/**/
29,
/**/
28,
/**/
27,
/**/
26,
/**/
25,
/**/
24,
/**/
23,
/**/
22,
/**/
21,
/**/
20,
/**/
19,
/**/
18,
/**/
17,
/**/
16,
/**/
15,
/**/
14,
/**/
13,
/**/
12,
/**/
11,
/**/
10,
/**/
9,
/**/
8,
/**/
7,
/**/
6,
/**/
5,
/**/
4,
/**/
3,
/**/
2,
/**/
1,
/**/
0
};
/*
* Place to put a short description when adding a feature with a patch.
* Keep it short, e.g.,: "relative numbers", "persistent undo".
* Also add a comment marker to separate the lines.
* See the official Vim patches for the diff format: It must use a context of
* one line only. Create it by hand or use "diff -C2" and edit the patch.
*/
static char *(extra_patches[]) =
{ /* Add your patch description below this line */
/**/
NULL
};
int
highest_patch(void)
{
int i;
int h = 0;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] > h)
h = included_patches[i];
return h;
}
#if defined(FEAT_EVAL) || defined(PROTO)
/*
* Return TRUE if patch "n" has been included.
*/
int
has_patch(int n)
{
int i;
for (i = 0; included_patches[i] != 0; ++i)
if (included_patches[i] == n)
return TRUE;
return FALSE;
}
#endif
void
ex_version(exarg_T *eap)
{
/*
* Ignore a ":version 9.99" command.
*/
if (*eap->arg == NUL)
{
msg_putchar('\n');
list_version();
}
}
/*
* Output a string for the version message. If it's going to wrap, output a
* newline, unless the message is too long to fit on the screen anyway.
* When "wrap" is TRUE wrap the string in [].
*/
static void
version_msg_wrap(char_u *s, int wrap)
{
int len = (int)vim_strsize(s) + (wrap ? 2 : 0);
if (!got_int && len < (int)Columns && msg_col + len >= (int)Columns
&& *s != '\n')
msg_putchar('\n');
if (!got_int)
{
if (wrap)
MSG_PUTS("[");
MSG_PUTS(s);
if (wrap)
MSG_PUTS("]");
}
}
static void
version_msg(char *s)
{
version_msg_wrap((char_u *)s, FALSE);
}
/*
* List all features aligned in columns, dictionary style.
*/
static void
list_features(void)
{
list_in_columns((char_u **)features, -1, -1);
}
/*
* List string items nicely aligned in columns.
* When "size" is < 0 then the last entry is marked with NULL.
* The entry with index "current" is inclosed in [].
*/
void
list_in_columns(char_u **items, int size, int current)
{
int i;
int ncol;
int nrow;
int item_count = 0;
int width = 0;
/* Find the length of the longest item, use that + 1 as the column
* width. */
for (i = 0; size < 0 ? items[i] != NULL : i < size; ++i)
{
int l = (int)vim_strsize(items[i]) + (i == current ? 2 : 0);
if (l > width)
width = l;
++item_count;
}
width += 1;
if (Columns < width)
{
/* Not enough screen columns - show one per line */
for (i = 0; i < item_count; ++i)
{
version_msg_wrap(items[i], i == current);
if (msg_col > 0)
msg_putchar('\n');
}
return;
}
/* The rightmost column doesn't need a separator.
* Sacrifice it to fit in one more column if possible. */
ncol = (int) (Columns + 1) / width;
nrow = item_count / ncol + (item_count % ncol ? 1 : 0);
/* i counts columns then rows. idx counts rows then columns. */
for (i = 0; !got_int && i < nrow * ncol; ++i)
{
int idx = (i / ncol) + (i % ncol) * nrow;
if (idx < item_count)
{
int last_col = (i + 1) % ncol == 0;
if (idx == current)
msg_putchar('[');
msg_puts(items[idx]);
if (idx == current)
msg_putchar(']');
if (last_col)
{
if (msg_col > 0)
msg_putchar('\n');
}
else
{
while (msg_col % width)
msg_putchar(' ');
}
}
else
{
if (msg_col > 0)
msg_putchar('\n');
}
}
}
void
list_version(void)
{
int i;
int first;
char *s = "";
/*
* When adding features here, don't forget to update the list of
* internal variables in eval.c!
*/
init_longVersion();
MSG(longVersion);
#ifdef WIN3264
# ifdef FEAT_GUI_W32
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit GUI version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit GUI version"));
# endif
# ifdef FEAT_OLE
MSG_PUTS(_(" with OLE support"));
# endif
# else
# ifdef _WIN64
MSG_PUTS(_("\nMS-Windows 64-bit console version"));
# else
MSG_PUTS(_("\nMS-Windows 32-bit console version"));
# endif
# endif
#endif
#if defined(MACOS_X)
# if defined(MACOS_X_DARWIN)
MSG_PUTS(_("\nmacOS version"));
# else
MSG_PUTS(_("\nmacOS version w/o darwin feat."));
# endif
#endif
#ifdef VMS
MSG_PUTS(_("\nOpenVMS version"));
# ifdef HAVE_PATHDEF
if (*compiled_arch != NUL)
{
MSG_PUTS(" - ");
MSG_PUTS(compiled_arch);
}
# endif
#endif
/* Print the list of patch numbers if there is at least one. */
/* Print a range when patches are consecutive: "1-10, 12, 15-40, 42-45" */
if (included_patches[0] != 0)
{
MSG_PUTS(_("\nIncluded patches: "));
first = -1;
/* find last one */
for (i = 0; included_patches[i] != 0; ++i)
;
while (--i >= 0)
{
if (first < 0)
first = included_patches[i];
if (i == 0 || included_patches[i - 1] != included_patches[i] + 1)
{
MSG_PUTS(s);
s = ", ";
msg_outnum((long)first);
if (first != included_patches[i])
{
MSG_PUTS("-");
msg_outnum((long)included_patches[i]);
}
first = -1;
}
}
}
/* Print the list of extra patch descriptions if there is at least one. */
if (extra_patches[0] != NULL)
{
MSG_PUTS(_("\nExtra patches: "));
s = "";
for (i = 0; extra_patches[i] != NULL; ++i)
{
MSG_PUTS(s);
s = ", ";
MSG_PUTS(extra_patches[i]);
}
}
#ifdef MODIFIED_BY
MSG_PUTS("\n");
MSG_PUTS(_("Modified by "));
MSG_PUTS(MODIFIED_BY);
#endif
#ifdef HAVE_PATHDEF
if (*compiled_user != NUL || *compiled_sys != NUL)
{
MSG_PUTS(_("\nCompiled "));
if (*compiled_user != NUL)
{
MSG_PUTS(_("by "));
MSG_PUTS(compiled_user);
}
if (*compiled_sys != NUL)
{
MSG_PUTS("@");
MSG_PUTS(compiled_sys);
}
}
#endif
#ifdef FEAT_HUGE
MSG_PUTS(_("\nHuge version "));
#else
# ifdef FEAT_BIG
MSG_PUTS(_("\nBig version "));
# else
# ifdef FEAT_NORMAL
MSG_PUTS(_("\nNormal version "));
# else
# ifdef FEAT_SMALL
MSG_PUTS(_("\nSmall version "));
# else
MSG_PUTS(_("\nTiny version "));
# endif
# endif
# endif
#endif
#ifndef FEAT_GUI
MSG_PUTS(_("without GUI."));
#else
# ifdef FEAT_GUI_GTK
# ifdef USE_GTK3
MSG_PUTS(_("with GTK3 GUI."));
# else
# ifdef FEAT_GUI_GNOME
MSG_PUTS(_("with GTK2-GNOME GUI."));
# else
MSG_PUTS(_("with GTK2 GUI."));
# endif
# endif
# else
# ifdef FEAT_GUI_MOTIF
MSG_PUTS(_("with X11-Motif GUI."));
# else
# ifdef FEAT_GUI_ATHENA
# ifdef FEAT_GUI_NEXTAW
MSG_PUTS(_("with X11-neXtaw GUI."));
# else
MSG_PUTS(_("with X11-Athena GUI."));
# endif
# else
# ifdef FEAT_GUI_PHOTON
MSG_PUTS(_("with Photon GUI."));
# else
# if defined(MSWIN)
MSG_PUTS(_("with GUI."));
# else
# if defined(TARGET_API_MAC_CARBON) && TARGET_API_MAC_CARBON
MSG_PUTS(_("with Carbon GUI."));
# else
# if defined(TARGET_API_MAC_OSX) && TARGET_API_MAC_OSX
MSG_PUTS(_("with Cocoa GUI."));
# else
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
version_msg(_(" Features included (+) or not (-):\n"));
list_features();
#ifdef SYS_VIMRC_FILE
version_msg(_(" system vimrc file: \""));
version_msg(SYS_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE
version_msg(_(" user vimrc file: \""));
version_msg(USR_VIMRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE2
version_msg(_(" 2nd user vimrc file: \""));
version_msg(USR_VIMRC_FILE2);
version_msg("\"\n");
#endif
#ifdef USR_VIMRC_FILE3
version_msg(_(" 3rd user vimrc file: \""));
version_msg(USR_VIMRC_FILE3);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE
version_msg(_(" user exrc file: \""));
version_msg(USR_EXRC_FILE);
version_msg("\"\n");
#endif
#ifdef USR_EXRC_FILE2
version_msg(_(" 2nd user exrc file: \""));
version_msg(USR_EXRC_FILE2);
version_msg("\"\n");
#endif
#ifdef FEAT_GUI
# ifdef SYS_GVIMRC_FILE
version_msg(_(" system gvimrc file: \""));
version_msg(SYS_GVIMRC_FILE);
version_msg("\"\n");
# endif
version_msg(_(" user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE);
version_msg("\"\n");
# ifdef USR_GVIMRC_FILE2
version_msg(_("2nd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE2);
version_msg("\"\n");
# endif
# ifdef USR_GVIMRC_FILE3
version_msg(_("3rd user gvimrc file: \""));
version_msg(USR_GVIMRC_FILE3);
version_msg("\"\n");
# endif
#endif
version_msg(_(" defaults file: \""));
version_msg(VIM_DEFAULTS_FILE);
version_msg("\"\n");
#ifdef FEAT_GUI
# ifdef SYS_MENU_FILE
version_msg(_(" system menu file: \""));
version_msg(SYS_MENU_FILE);
version_msg("\"\n");
# endif
#endif
#ifdef HAVE_PATHDEF
if (*default_vim_dir != NUL)
{
version_msg(_(" fall-back for $VIM: \""));
version_msg((char *)default_vim_dir);
version_msg("\"\n");
}
if (*default_vimruntime_dir != NUL)
{
version_msg(_(" f-b for $VIMRUNTIME: \""));
version_msg((char *)default_vimruntime_dir);
version_msg("\"\n");
}
version_msg(_("Compilation: "));
version_msg((char *)all_cflags);
version_msg("\n");
#ifdef VMS
if (*compiler_version != NUL)
{
version_msg(_("Compiler: "));
version_msg((char *)compiler_version);
version_msg("\n");
}
#endif
version_msg(_("Linking: "));
version_msg((char *)all_lflags);
#endif
#ifdef DEBUG
version_msg("\n");
version_msg(_(" DEBUG BUILD"));
#endif
}
static void do_intro_line(int row, char_u *mesg, int add_version, int attr);
/*
* Show the intro message when not editing a file.
*/
void
maybe_intro_message(void)
{
if (BUFEMPTY()
&& curbuf->b_fname == NULL
&& firstwin->w_next == NULL
&& vim_strchr(p_shm, SHM_INTRO) == NULL)
intro_message(FALSE);
}
/*
* Give an introductory message about Vim.
* Only used when starting Vim on an empty file, without a file name.
* Or with the ":intro" command (for Sven :-).
*/
void
intro_message(
int colon) /* TRUE for ":intro" */
{
int i;
int row;
int blanklines;
int sponsor;
char *p;
static char *(lines[]) =
{
N_("VIM - Vi IMproved"),
"",
N_("version "),
N_("by Bram Moolenaar et al."),
#ifdef MODIFIED_BY
" ",
#endif
N_("Vim is open source and freely distributable"),
"",
N_("Help poor children in Uganda!"),
N_("type :help iccf<Enter> for information "),
"",
N_("type :q<Enter> to exit "),
N_("type :help<Enter> or <F1> for on-line help"),
N_("type :help version8<Enter> for version info"),
NULL,
"",
N_("Running in Vi compatible mode"),
N_("type :set nocp<Enter> for Vim defaults"),
N_("type :help cp-default<Enter> for info on this"),
};
#ifdef FEAT_GUI
static char *(gui_lines[]) =
{
NULL,
NULL,
NULL,
NULL,
#ifdef MODIFIED_BY
NULL,
#endif
NULL,
NULL,
NULL,
N_("menu Help->Orphans for information "),
NULL,
N_("Running modeless, typed text is inserted"),
N_("menu Edit->Global Settings->Toggle Insert Mode "),
N_(" for two modes "),
NULL,
NULL,
NULL,
N_("menu Edit->Global Settings->Toggle Vi Compatible"),
N_(" for Vim defaults "),
};
#endif
/* blanklines = screen height - # message lines */
blanklines = (int)Rows - ((sizeof(lines) / sizeof(char *)) - 1);
if (!p_cp)
blanklines += 4; /* add 4 for not showing "Vi compatible" message */
/* Don't overwrite a statusline. Depends on 'cmdheight'. */
if (p_ls > 1)
blanklines -= Rows - topframe->fr_height;
if (blanklines < 0)
blanklines = 0;
/* Show the sponsor and register message one out of four times, the Uganda
* message two out of four times. */
sponsor = (int)time(NULL);
sponsor = ((sponsor & 2) == 0) - ((sponsor & 4) == 0);
/* start displaying the message lines after half of the blank lines */
row = blanklines / 2;
if ((row >= 2 && Columns >= 50) || colon)
{
for (i = 0; i < (int)(sizeof(lines) / sizeof(char *)); ++i)
{
p = lines[i];
#ifdef FEAT_GUI
if (p_im && gui.in_use && gui_lines[i] != NULL)
p = gui_lines[i];
#endif
if (p == NULL)
{
if (!p_cp)
break;
continue;
}
if (sponsor != 0)
{
if (strstr(p, "children") != NULL)
p = sponsor < 0
? N_("Sponsor Vim development!")
: N_("Become a registered Vim user!");
else if (strstr(p, "iccf") != NULL)
p = sponsor < 0
? N_("type :help sponsor<Enter> for information ")
: N_("type :help register<Enter> for information ");
else if (strstr(p, "Orphans") != NULL)
p = N_("menu Help->Sponsor/Register for information ");
}
if (*p != NUL)
do_intro_line(row, (char_u *)_(p), i == 2, 0);
++row;
}
}
/* Make the wait-return message appear just below the text. */
if (colon)
msg_row = row;
}
static void
do_intro_line(
int row,
char_u *mesg,
int add_version,
int attr)
{
char_u vers[20];
int col;
char_u *p;
int l;
int clen;
#ifdef MODIFIED_BY
# define MODBY_LEN 150
char_u modby[MODBY_LEN];
if (*mesg == ' ')
{
vim_strncpy(modby, (char_u *)_("Modified by "), MODBY_LEN - 1);
l = (int)STRLEN(modby);
vim_strncpy(modby + l, (char_u *)MODIFIED_BY, MODBY_LEN - l - 1);
mesg = modby;
}
#endif
/* Center the message horizontally. */
col = vim_strsize(mesg);
if (add_version)
{
STRCPY(vers, mediumVersion);
if (highest_patch())
{
/* Check for 9.9x or 9.9xx, alpha/beta version */
if (isalpha((int)vers[3]))
{
int len = (isalpha((int)vers[4])) ? 5 : 4;
sprintf((char *)vers + len, ".%d%s", highest_patch(),
mediumVersion + len);
}
else
sprintf((char *)vers + 3, ".%d", highest_patch());
}
col += (int)STRLEN(vers);
}
col = (Columns - col) / 2;
if (col < 0)
col = 0;
/* Split up in parts to highlight <> items differently. */
for (p = mesg; *p != NUL; p += l)
{
clen = 0;
for (l = 0; p[l] != NUL
&& (l == 0 || (p[l] != '<' && p[l - 1] != '>')); ++l)
{
#ifdef FEAT_MBYTE
if (has_mbyte)
{
clen += ptr2cells(p + l);
l += (*mb_ptr2len)(p + l) - 1;
}
else
#endif
clen += byte2cells(p[l]);
}
screen_puts_len(p, l, row, col, *p == '<' ? HL_ATTR(HLF_8) : attr);
col += clen;
}
/* Add the version number to the version line. */
if (add_version)
screen_puts(vers, row, col, 0);
}
/*
* ":intro": clear screen, display intro screen and wait for return.
*/
void
ex_intro(exarg_T *eap UNUSED)
{
screenclear();
intro_message(TRUE);
wait_return(TRUE);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_528_4 |
crossvul-cpp_data_good_272_0 | #include <stdio.h>
#include <math.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include "gd_intern.h"
/* 2.03: don't include zlib here or we can't build without PNG */
#include "gd.h"
#include "gdhelpers.h"
#include "gd_color.h"
#include "gd_errors.h"
/* 2.0.12: this now checks the clipping rectangle */
#define gdImageBoundsSafeMacro(im, x, y) (!((((y) < (im)->cy1) || ((y) > (im)->cy2)) || (((x) < (im)->cx1) || ((x) > (im)->cx2))))
#ifdef _OSD_POSIX /* BS2000 uses the EBCDIC char set instead of ASCII */
#define CHARSET_EBCDIC
#define __attribute__(any) /*nothing */
#endif
/*_OSD_POSIX*/
#ifndef CHARSET_EBCDIC
#define ASC(ch) ch
#else /*CHARSET_EBCDIC */
#define ASC(ch) gd_toascii[(unsigned char)ch]
static const unsigned char gd_toascii[256] = {
/*00 */ 0x00, 0x01, 0x02, 0x03, 0x85, 0x09, 0x86, 0x7f,
0x87, 0x8d, 0x8e, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /*................ */
/*10 */ 0x10, 0x11, 0x12, 0x13, 0x8f, 0x0a, 0x08, 0x97,
0x18, 0x19, 0x9c, 0x9d, 0x1c, 0x1d, 0x1e, 0x1f, /*................ */
/*20 */ 0x80, 0x81, 0x82, 0x83, 0x84, 0x92, 0x17, 0x1b,
0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x05, 0x06, 0x07, /*................ */
/*30 */ 0x90, 0x91, 0x16, 0x93, 0x94, 0x95, 0x96, 0x04,
0x98, 0x99, 0x9a, 0x9b, 0x14, 0x15, 0x9e, 0x1a, /*................ */
/*40 */ 0x20, 0xa0, 0xe2, 0xe4, 0xe0, 0xe1, 0xe3, 0xe5,
0xe7, 0xf1, 0x60, 0x2e, 0x3c, 0x28, 0x2b, 0x7c, /* .........`.<(+| */
/*50 */ 0x26, 0xe9, 0xea, 0xeb, 0xe8, 0xed, 0xee, 0xef,
0xec, 0xdf, 0x21, 0x24, 0x2a, 0x29, 0x3b, 0x9f, /*&.........!$*);. */
/*60 */ 0x2d, 0x2f, 0xc2, 0xc4, 0xc0, 0xc1, 0xc3, 0xc5,
0xc7, 0xd1, 0x5e, 0x2c, 0x25, 0x5f, 0x3e, 0x3f,
/*-/........^,%_>?*/
/*70 */ 0xf8, 0xc9, 0xca, 0xcb, 0xc8, 0xcd, 0xce, 0xcf,
0xcc, 0xa8, 0x3a, 0x23, 0x40, 0x27, 0x3d, 0x22, /*..........:#@'=" */
/*80 */ 0xd8, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
0x68, 0x69, 0xab, 0xbb, 0xf0, 0xfd, 0xfe, 0xb1, /*.abcdefghi...... */
/*90 */ 0xb0, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70,
0x71, 0x72, 0xaa, 0xba, 0xe6, 0xb8, 0xc6, 0xa4, /*.jklmnopqr...... */
/*a0 */ 0xb5, 0xaf, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
0x79, 0x7a, 0xa1, 0xbf, 0xd0, 0xdd, 0xde, 0xae, /*..stuvwxyz...... */
/*b0 */ 0xa2, 0xa3, 0xa5, 0xb7, 0xa9, 0xa7, 0xb6, 0xbc,
0xbd, 0xbe, 0xac, 0x5b, 0x5c, 0x5d, 0xb4, 0xd7, /*...........[\].. */
/*c0 */ 0xf9, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
0x48, 0x49, 0xad, 0xf4, 0xf6, 0xf2, 0xf3, 0xf5, /*.ABCDEFGHI...... */
/*d0 */ 0xa6, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50,
0x51, 0x52, 0xb9, 0xfb, 0xfc, 0xdb, 0xfa, 0xff, /*.JKLMNOPQR...... */
/*e0 */ 0xd9, 0xf7, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
0x59, 0x5a, 0xb2, 0xd4, 0xd6, 0xd2, 0xd3, 0xd5, /*..STUVWXYZ...... */
/*f0 */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
0x38, 0x39, 0xb3, 0x7b, 0xdc, 0x7d, 0xda, 0x7e /*0123456789.{.}.~ */
};
#endif /*CHARSET_EBCDIC */
extern const int gdCosT[];
extern const int gdSinT[];
/**
* Group: Error Handling
*/
void gd_stderr_error(int priority, const char *format, va_list args)
{
switch (priority) {
case GD_ERROR:
fputs("GD Error: ", stderr);
break;
case GD_WARNING:
fputs("GD Warning: ", stderr);
break;
case GD_NOTICE:
fputs("GD Notice: ", stderr);
break;
case GD_INFO:
fputs("GD Info: ", stderr);
break;
case GD_DEBUG:
fputs("GD Debug: ", stderr);
break;
}
vfprintf(stderr, format, args);
fflush(stderr);
}
static gdErrorMethod gd_error_method = gd_stderr_error;
static void _gd_error_ex(int priority, const char *format, va_list args)
{
if (gd_error_method) {
gd_error_method(priority, format, args);
}
}
void gd_error(const char *format, ...)
{
va_list args;
va_start(args, format);
_gd_error_ex(GD_WARNING, format, args);
va_end(args);
}
void gd_error_ex(int priority, const char *format, ...)
{
va_list args;
va_start(args, format);
_gd_error_ex(priority, format, args);
va_end(args);
}
/*
Function: gdSetErrorMethod
*/
BGD_DECLARE(void) gdSetErrorMethod(gdErrorMethod error_method)
{
gd_error_method = error_method;
}
/*
Function: gdClearErrorMethod
*/
BGD_DECLARE(void) gdClearErrorMethod(void)
{
gd_error_method = gd_stderr_error;
}
static void gdImageBrushApply (gdImagePtr im, int x, int y);
static void gdImageTileApply (gdImagePtr im, int x, int y);
BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y);
/**
* Group: Creation and Destruction
*/
/*
Function: gdImageCreate
gdImageCreate is called to create palette-based images, with no
more than 256 colors. The image must eventually be destroyed using
gdImageDestroy().
Parameters:
sx - The image width.
sy - The image height.
Returns:
A pointer to the new image or NULL if an error occurred.
Example:
(start code)
gdImagePtr im;
im = gdImageCreate(64, 64);
// ... Use the image ...
gdImageDestroy(im);
(end code)
See Also:
<gdImageCreateTrueColor>
*/
BGD_DECLARE(gdImagePtr) gdImageCreate (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char *), sy)) {
return NULL;
}
if (overflow2(sizeof (unsigned char), sx)) {
return NULL;
}
im = (gdImage *) gdCalloc(1, sizeof(gdImage));
if (!im) {
return NULL;
}
/* Row-major ever since gd 1.3 */
im->pixels = (unsigned char **) gdMalloc (sizeof (unsigned char *) * sy);
if (!im->pixels) {
gdFree(im);
return NULL;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
/* Row-major ever since gd 1.3 */
im->pixels[i] = (unsigned char *) gdCalloc (sx, sizeof (unsigned char));
if (!im->pixels[i]) {
for (--i ; i >= 0; i--) {
gdFree(im->pixels[i]);
}
gdFree(im->pixels);
gdFree(im);
return NULL;
}
}
im->sx = sx;
im->sy = sy;
im->colorsTotal = 0;
im->transparent = (-1);
im->interlace = 0;
im->thick = 1;
im->AA = 0;
for (i = 0; (i < gdMaxColors); i++) {
im->open[i] = 1;
};
im->trueColor = 0;
im->tpixels = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
/*
Function: gdImageCreateTrueColor
<gdImageCreateTrueColor> is called to create truecolor images,
with an essentially unlimited number of colors. Invoke
<gdImageCreateTrueColor> with the x and y dimensions of the
desired image. <gdImageCreateTrueColor> returns a <gdImagePtr>
to the new image, or NULL if unable to allocate the image. The
image must eventually be destroyed using <gdImageDestroy>().
Truecolor images are always filled with black at creation
time. There is no concept of a "background" color index.
Parameters:
sx - The image width.
sy - The image height.
Returns:
A pointer to the new image or NULL if an error occurred.
Example:
(start code)
gdImagePtr im;
im = gdImageCreateTrueColor(64, 64);
// ... Use the image ...
gdImageDestroy(im);
(end code)
See Also:
<gdImageCreateTrueColor>
*/
BGD_DECLARE(gdImagePtr) gdImageCreateTrueColor (int sx, int sy)
{
int i;
gdImagePtr im;
if (overflow2(sx, sy)) {
return NULL;
}
if (overflow2(sizeof (int *), sy)) {
return 0;
}
if (overflow2(sizeof(int), sx)) {
return NULL;
}
im = (gdImage *) gdMalloc (sizeof (gdImage));
if (!im) {
return 0;
}
memset (im, 0, sizeof (gdImage));
im->tpixels = (int **) gdMalloc (sizeof (int *) * sy);
if (!im->tpixels) {
gdFree(im);
return 0;
}
im->polyInts = 0;
im->polyAllocated = 0;
im->brush = 0;
im->tile = 0;
im->style = 0;
for (i = 0; (i < sy); i++) {
im->tpixels[i] = (int *) gdCalloc (sx, sizeof (int));
if (!im->tpixels[i]) {
/* 2.0.34 */
i--;
while (i >= 0) {
gdFree(im->tpixels[i]);
i--;
}
gdFree(im->tpixels);
gdFree(im);
return 0;
}
}
im->sx = sx;
im->sy = sy;
im->transparent = (-1);
im->interlace = 0;
im->trueColor = 1;
/* 2.0.2: alpha blending is now on by default, and saving of alpha is
off by default. This allows font antialiasing to work as expected
on the first try in JPEGs -- quite important -- and also allows
for smaller PNGs when saving of alpha channel is not really
desired, which it usually isn't! */
im->saveAlphaFlag = 0;
im->alphaBlendingFlag = 1;
im->thick = 1;
im->AA = 0;
im->cx1 = 0;
im->cy1 = 0;
im->cx2 = im->sx - 1;
im->cy2 = im->sy - 1;
im->res_x = GD_RESOLUTION;
im->res_y = GD_RESOLUTION;
im->interpolation = NULL;
im->interpolation_id = GD_BILINEAR_FIXED;
return im;
}
/*
Function: gdImageDestroy
<gdImageDestroy> is used to free the memory associated with an
image. It is important to invoke <gdImageDestroy> before exiting
your program or assigning a new image to a <gdImagePtr> variable.
Parameters:
im - Pointer to the gdImage to delete.
Returns:
Nothing.
Example:
(start code)
gdImagePtr im;
im = gdImageCreate(10, 10);
// ... Use the image ...
// Now destroy it
gdImageDestroy(im);
(end code)
*/
BGD_DECLARE(void) gdImageDestroy (gdImagePtr im)
{
int i;
if (im->pixels) {
for (i = 0; (i < im->sy); i++) {
gdFree (im->pixels[i]);
}
gdFree (im->pixels);
}
if (im->tpixels) {
for (i = 0; (i < im->sy); i++) {
gdFree (im->tpixels[i]);
}
gdFree (im->tpixels);
}
if (im->polyInts) {
gdFree (im->polyInts);
}
if (im->style) {
gdFree (im->style);
}
gdFree (im);
}
/**
* Group: Color
*/
/**
* Function: gdImageColorClosest
*
* Gets the closest color of the image
*
* This is a simplified variant of <gdImageColorClosestAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The closest color already available in the palette for palette images;
* the color value of the given components for truecolor images.
*
* See also:
* - <gdImageColorExact>
*/
BGD_DECLARE(int) gdImageColorClosest (gdImagePtr im, int r, int g, int b)
{
return gdImageColorClosestAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorClosestAlpha
*
* Gets the closest color of the image
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
* a - The value of the alpha component.
*
* Returns:
* The closest color already available in the palette for palette images;
* the color value of the given components for truecolor images.
*
* See also:
* - <gdImageColorExactAlpha>
*/
BGD_DECLARE(int) gdImageColorClosestAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
long rd, gd, bd, ad;
int ct = (-1);
int first = 1;
long mindist = 0;
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
long dist;
if (im->open[i]) {
continue;
}
rd = (im->red[i] - r);
gd = (im->green[i] - g);
bd = (im->blue[i] - b);
/* gd 2.02: whoops, was - b (thanks to David Marwood) */
/* gd 2.16: was blue rather than alpha! Geez! Thanks to
Artur Jakub Jerzak */
ad = (im->alpha[i] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
/* This code is taken from http://www.acm.org/jgt/papers/SmithLyons96/hwb_rgb.html, an article
* on colour conversion to/from RBG and HWB colour systems.
* It has been modified to return the converted value as a * parameter.
*/
#define RETURN_HWB(h, w, b) {HWB->H = h; HWB->W = w; HWB->B = b; return HWB;}
#define RETURN_RGB(r, g, b) {RGB->R = r; RGB->G = g; RGB->B = b; return RGB;}
#define HWB_UNDEFINED -1
#define SETUP_RGB(s, r, g, b) {s.R = r/255.0; s.G = g/255.0; s.B = b/255.0;}
#define MIN(a,b) ((a)<(b)?(a):(b))
#define MIN3(a,b,c) ((a)<(b)?(MIN(a,c)):(MIN(b,c)))
#define MAX(a,b) ((a)<(b)?(b):(a))
#define MAX3(a,b,c) ((a)<(b)?(MAX(b,c)):(MAX(a,c)))
/*
* Theoretically, hue 0 (pure red) is identical to hue 6 in these transforms. Pure
* red always maps to 6 in this implementation. Therefore UNDEFINED can be
* defined as 0 in situations where only unsigned numbers are desired.
*/
typedef struct {
float R, G, B;
}
RGBType;
typedef struct {
float H, W, B;
}
HWBType;
static HWBType *
RGB_to_HWB (RGBType RGB, HWBType * HWB)
{
/*
* RGB are each on [0, 1]. W and B are returned on [0, 1] and H is
* returned on [0, 6]. Exception: H is returned UNDEFINED if W == 1 - B.
*/
float R = RGB.R, G = RGB.G, B = RGB.B, w, v, b, f;
int i;
w = MIN3 (R, G, B);
v = MAX3 (R, G, B);
b = 1 - v;
if (v == w)
RETURN_HWB (HWB_UNDEFINED, w, b);
f = (R == w) ? G - B : ((G == w) ? B - R : R - G);
i = (R == w) ? 3 : ((G == w) ? 5 : 1);
RETURN_HWB (i - f / (v - w), w, b);
}
static float
HWB_Diff (int r1, int g1, int b1, int r2, int g2, int b2)
{
RGBType RGB1, RGB2;
HWBType HWB1, HWB2;
float diff;
SETUP_RGB (RGB1, r1, g1, b1);
SETUP_RGB (RGB2, r2, g2, b2);
RGB_to_HWB (RGB1, &HWB1);
RGB_to_HWB (RGB2, &HWB2);
/*
* I made this bit up; it seems to produce OK results, and it is certainly
* more visually correct than the current RGB metric. (PJW)
*/
if ((HWB1.H == HWB_UNDEFINED) || (HWB2.H == HWB_UNDEFINED)) {
diff = 0; /* Undefined hues always match... */
} else {
diff = fabs (HWB1.H - HWB2.H);
if (diff > 3) {
diff = 6 - diff; /* Remember, it's a colour circle */
}
}
diff =
diff * diff + (HWB1.W - HWB2.W) * (HWB1.W - HWB2.W) + (HWB1.B -
HWB2.B) * (HWB1.B -
HWB2.B);
return diff;
}
#if 0
/*
* This is not actually used, but is here for completeness, in case someone wants to
* use the HWB stuff for anything else...
*/
static RGBType *
HWB_to_RGB (HWBType HWB, RGBType * RGB)
{
/*
* H is given on [0, 6] or UNDEFINED. W and B are given on [0, 1].
* RGB are each returned on [0, 1].
*/
float h = HWB.H, w = HWB.W, b = HWB.B, v, n, f;
int i;
v = 1 - b;
if (h == HWB_UNDEFINED)
RETURN_RGB (v, v, v);
i = floor (h);
f = h - i;
if (i & 1)
f = 1 - f; /* if i is odd */
n = w + f * (v - w); /* linear interpolation between w and v */
switch (i) {
case 6:
case 0:
RETURN_RGB (v, n, w);
case 1:
RETURN_RGB (n, v, w);
case 2:
RETURN_RGB (w, v, n);
case 3:
RETURN_RGB (w, n, v);
case 4:
RETURN_RGB (n, w, v);
case 5:
RETURN_RGB (v, w, n);
}
return RGB;
}
#endif
/*
Function: gdImageColorClosestHWB
*/
BGD_DECLARE(int) gdImageColorClosestHWB (gdImagePtr im, int r, int g, int b)
{
int i;
/* long rd, gd, bd; */
int ct = (-1);
int first = 1;
float mindist = 0;
if (im->trueColor) {
return gdTrueColor (r, g, b);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
float dist;
if (im->open[i]) {
continue;
}
dist = HWB_Diff (im->red[i], im->green[i], im->blue[i], r, g, b);
if (first || (dist < mindist)) {
mindist = dist;
ct = i;
first = 0;
}
}
return ct;
}
/**
* Function: gdImageColorExact
*
* Gets the exact color of the image
*
* This is a simplified variant of <gdImageColorExactAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The exact color already available in the palette for palette images; if
* there is no exact color, -1 is returned.
* For truecolor images the color value of the given components is returned.
*
* See also:
* - <gdImageColorClosest>
*/
BGD_DECLARE(int) gdImageColorExact (gdImagePtr im, int r, int g, int b)
{
return gdImageColorExactAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorExactAlpha
*
* Gets the exact color of the image
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
* a - The value of the alpha component.
*
* Returns:
* The exact color already available in the palette for palette images; if
* there is no exact color, -1 is returned.
* For truecolor images the color value of the given components is returned.
*
* See also:
* - <gdImageColorClosestAlpha>
* - <gdTrueColorAlpha>
*/
BGD_DECLARE(int) gdImageColorExactAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
if (im->open[i]) {
continue;
}
if ((im->red[i] == r) &&
(im->green[i] == g) && (im->blue[i] == b) && (im->alpha[i] == a)) {
return i;
}
}
return -1;
}
/**
* Function: gdImageColorAllocate
*
* Allocates a color
*
* This is a simplified variant of <gdImageColorAllocateAlpha> where the alpha
* channel is always opaque.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The color value.
*
* See also:
* - <gdImageColorDeallocate>
*/
BGD_DECLARE(int) gdImageColorAllocate (gdImagePtr im, int r, int g, int b)
{
return gdImageColorAllocateAlpha (im, r, g, b, gdAlphaOpaque);
}
/**
* Function: gdImageColorAllocateAlpha
*
* Allocates a color
*
* This is typically used for palette images, but can be used for truecolor
* images as well.
*
* Parameters:
* im - The image.
* r - The value of the red component.
* g - The value of the green component.
* b - The value of the blue component.
*
* Returns:
* The color value.
*
* See also:
* - <gdImageColorDeallocate>
*/
BGD_DECLARE(int) gdImageColorAllocateAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int i;
int ct = (-1);
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (i = 0; (i < (im->colorsTotal)); i++) {
if (im->open[i]) {
ct = i;
break;
}
}
if (ct == (-1)) {
ct = im->colorsTotal;
if (ct == gdMaxColors) {
return -1;
}
im->colorsTotal++;
}
im->red[ct] = r;
im->green[ct] = g;
im->blue[ct] = b;
im->alpha[ct] = a;
im->open[ct] = 0;
return ct;
}
/*
Function: gdImageColorResolve
gdImageColorResolve is an alternative for the code fragment
(start code)
if ((color=gdImageColorExact(im,R,G,B)) < 0)
if ((color=gdImageColorAllocate(im,R,G,B)) < 0)
color=gdImageColorClosest(im,R,G,B);
(end code)
in a single function. Its advantage is that it is guaranteed to
return a color index in one search over the color table.
*/
BGD_DECLARE(int) gdImageColorResolve (gdImagePtr im, int r, int g, int b)
{
return gdImageColorResolveAlpha (im, r, g, b, gdAlphaOpaque);
}
/*
Function: gdImageColorResolveAlpha
*/
BGD_DECLARE(int) gdImageColorResolveAlpha (gdImagePtr im, int r, int g, int b, int a)
{
int c;
int ct = -1;
int op = -1;
long rd, gd, bd, ad, dist;
long mindist = 4 * 255 * 255; /* init to max poss dist */
if (im->trueColor) {
return gdTrueColorAlpha (r, g, b, a);
}
for (c = 0; c < im->colorsTotal; c++) {
if (im->open[c]) {
op = c; /* Save open slot */
continue; /* Color not in use */
}
if (c == im->transparent) {
/* don't ever resolve to the color that has
* been designated as the transparent color */
continue;
}
rd = (long) (im->red[c] - r);
gd = (long) (im->green[c] - g);
bd = (long) (im->blue[c] - b);
ad = (long) (im->alpha[c] - a);
dist = rd * rd + gd * gd + bd * bd + ad * ad;
if (dist < mindist) {
if (dist == 0) {
return c; /* Return exact match color */
}
mindist = dist;
ct = c;
}
}
/* no exact match. We now know closest, but first try to allocate exact */
if (op == -1) {
op = im->colorsTotal;
if (op == gdMaxColors) {
/* No room for more colors */
return ct; /* Return closest available color */
}
im->colorsTotal++;
}
im->red[op] = r;
im->green[op] = g;
im->blue[op] = b;
im->alpha[op] = a;
im->open[op] = 0;
return op; /* Return newly allocated color */
}
/**
* Function: gdImageColorDeallocate
*
* Removes a palette entry
*
* This is a no-op for truecolor images.
*
* Parameters:
* im - The image.
* color - The palette index.
*
* See also:
* - <gdImageColorAllocate>
* - <gdImageColorAllocateAlpha>
*/
BGD_DECLARE(void) gdImageColorDeallocate (gdImagePtr im, int color)
{
if (im->trueColor || (color >= gdMaxColors) || (color < 0)) {
return;
}
/* Mark it open. */
im->open[color] = 1;
}
/**
* Function: gdImageColorTransparent
*
* Sets the transparent color of the image
*
* Parameter:
* im - The image.
* color - The color.
*
* See also:
* - <gdImageGetTransparent>
*/
BGD_DECLARE(void) gdImageColorTransparent (gdImagePtr im, int color)
{
if (color < 0) {
return;
}
if (!im->trueColor) {
if (color >= gdMaxColors) {
return;
}
if (im->transparent != -1) {
im->alpha[im->transparent] = gdAlphaOpaque;
}
im->alpha[color] = gdAlphaTransparent;
}
im->transparent = color;
}
/*
Function: gdImagePaletteCopy
*/
BGD_DECLARE(void) gdImagePaletteCopy (gdImagePtr to, gdImagePtr from)
{
int i;
int x, y, p;
int xlate[256];
if (to->trueColor) {
return;
}
if (from->trueColor) {
return;
}
for (i = 0; i < 256; i++) {
xlate[i] = -1;
};
for (y = 0; y < (to->sy); y++) {
for (x = 0; x < (to->sx); x++) {
/* Optimization: no gdImageGetPixel */
p = to->pixels[y][x];
if (xlate[p] == -1) {
/* This ought to use HWB, but we don't have an alpha-aware
version of that yet. */
xlate[p] =
gdImageColorClosestAlpha (from, to->red[p], to->green[p],
to->blue[p], to->alpha[p]);
/*printf("Mapping %d (%d, %d, %d, %d) to %d (%d, %d, %d, %d)\n", */
/* p, to->red[p], to->green[p], to->blue[p], to->alpha[p], */
/* xlate[p], from->red[xlate[p]], from->green[xlate[p]], from->blue[xlate[p]], from->alpha[xlate[p]]); */
};
/* Optimization: no gdImageSetPixel */
to->pixels[y][x] = xlate[p];
};
};
for (i = 0; (i < (from->colorsTotal)); i++) {
/*printf("Copying color %d (%d, %d, %d, %d)\n", i, from->red[i], from->blue[i], from->green[i], from->alpha[i]); */
to->red[i] = from->red[i];
to->blue[i] = from->blue[i];
to->green[i] = from->green[i];
to->alpha[i] = from->alpha[i];
to->open[i] = 0;
};
for (i = from->colorsTotal; (i < to->colorsTotal); i++) {
to->open[i] = 1;
};
to->colorsTotal = from->colorsTotal;
}
/*
Function: gdImageColorReplace
*/
BGD_DECLARE(int) gdImageColorReplace (gdImagePtr im, int src, int dst)
{
register int x, y;
int n = 0;
if (src == dst) {
return 0;
}
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
if (pixel(im, x, y) == src) { \
gdImageSetPixel(im, x, y, dst); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
return n;
}
/*
Function: gdImageColorReplaceThreshold
*/
BGD_DECLARE(int) gdImageColorReplaceThreshold (gdImagePtr im, int src, int dst, float threshold)
{
register int x, y;
int n = 0;
if (src == dst) {
return 0;
}
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
if (gdColorMatch(im, src, pixel(im, x, y), threshold)) { \
gdImageSetPixel(im, x, y, dst); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
return n;
}
static int colorCmp (const void *x, const void *y)
{
int a = *(int const *)x;
int b = *(int const *)y;
return (a > b) - (a < b);
}
/*
Function: gdImageColorReplaceArray
*/
BGD_DECLARE(int) gdImageColorReplaceArray (gdImagePtr im, int len, int *src, int *dst)
{
register int x, y;
int c, *d, *base;
int i, n = 0;
if (len <= 0 || src == dst) {
return 0;
}
if (len == 1) {
return gdImageColorReplace(im, src[0], dst[0]);
}
if (overflow2(len, sizeof(int)<<1)) {
return -1;
}
base = (int *)gdMalloc(len * (sizeof(int)<<1));
if (!base) {
return -1;
}
for (i = 0; i < len; i++) {
base[(i<<1)] = src[i];
base[(i<<1)+1] = dst[i];
}
qsort(base, len, sizeof(int)<<1, colorCmp);
#define REPLACING_LOOP(pixel) do { \
for (y = im->cy1; y <= im->cy2; y++) { \
for (x = im->cx1; x <= im->cx2; x++) { \
c = pixel(im, x, y); \
if ( (d = (int *)bsearch(&c, base, len, sizeof(int)<<1, colorCmp)) ) { \
gdImageSetPixel(im, x, y, d[1]); \
n++; \
} \
} \
} \
} while (0)
if (im->trueColor) {
REPLACING_LOOP(gdImageTrueColorPixel);
} else {
REPLACING_LOOP(gdImagePalettePixel);
}
#undef REPLACING_LOOP
gdFree(base);
return n;
}
/*
Function: gdImageColorReplaceCallback
*/
BGD_DECLARE(int) gdImageColorReplaceCallback (gdImagePtr im, gdCallbackImageColor callback)
{
int c, d, n = 0;
if (!callback) {
return 0;
}
if (im->trueColor) {
register int x, y;
for (y = im->cy1; y <= im->cy2; y++) {
for (x = im->cx1; x <= im->cx2; x++) {
c = gdImageTrueColorPixel(im, x, y);
if ( (d = callback(im, c)) != c) {
gdImageSetPixel(im, x, y, d);
n++;
}
}
}
} else { /* palette */
int *sarr, *darr;
int k, len = 0;
sarr = (int *)gdCalloc(im->colorsTotal, sizeof(int));
if (!sarr) {
return -1;
}
for (c = 0; c < im->colorsTotal; c++) {
if (!im->open[c]) {
sarr[len++] = c;
}
}
darr = (int *)gdCalloc(len, sizeof(int));
if (!darr) {
gdFree(sarr);
return -1;
}
for (k = 0; k < len; k++) {
darr[k] = callback(im, sarr[k]);
}
n = gdImageColorReplaceArray(im, k, sarr, darr);
gdFree(darr);
gdFree(sarr);
}
return n;
}
/* 2.0.10: before the drawing routines, some code to clip points that are
* outside the drawing window. Nick Atty (nick@canalplan.org.uk)
*
* This is the Sutherland Hodgman Algorithm, as implemented by
* Duvanenko, Robbins and Gyurcsik - SH(DRG) for short. See Dr Dobb's
* Journal, January 1996, pp107-110 and 116-117
*
* Given the end points of a line, and a bounding rectangle (which we
* know to be from (0,0) to (SX,SY)), adjust the endpoints to be on
* the edges of the rectangle if the line should be drawn at all,
* otherwise return a failure code */
/* this does "one-dimensional" clipping: note that the second time it
is called, all the x parameters refer to height and the y to width
- the comments ignore this (if you can understand it when it's
looking at the X parameters, it should become clear what happens on
the second call!) The code is simplified from that in the article,
as we know that gd images always start at (0,0) */
/* 2.0.26, TBB: we now have to respect a clipping rectangle, it won't
necessarily start at 0. */
static int
clip_1d (int *x0, int *y0, int *x1, int *y1, int mindim, int maxdim)
{
double m; /* gradient of line */
if (*x0 < mindim) {
/* start of line is left of window */
if (*x1 < mindim) /* as is the end, so the line never cuts the window */
return 0;
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
/* adjust x0 to be on the left boundary (ie to be zero), and y0 to match */
*y0 -= (int)(m * (*x0 - mindim));
*x0 = mindim;
/* now, perhaps, adjust the far end of the line as well */
if (*x1 > maxdim) {
*y1 += m * (maxdim - *x1);
*x1 = maxdim;
}
return 1;
}
if (*x0 > maxdim) {
/* start of line is right of window -
complement of above */
if (*x1 > maxdim) /* as is the end, so the line misses the window */
return 0;
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y0 += (int)(m * (maxdim - *x0)); /* adjust so point is on the right
boundary */
*x0 = maxdim;
/* now, perhaps, adjust the end of the line */
if (*x1 < mindim) {
*y1 -= (int)(m * (*x1 - mindim));
*x1 = mindim;
}
return 1;
}
/* the final case - the start of the line is inside the window */
if (*x1 > maxdim) {
/* other end is outside to the right */
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y1 += (int)(m * (maxdim - *x1));
*x1 = maxdim;
return 1;
}
if (*x1 < mindim) {
/* other end is outside to the left */
m = (*y1 - *y0) / (double) (*x1 - *x0); /* calculate the slope of the line */
*y1 -= (int)(m * (*x1 - mindim));
*x1 = mindim;
return 1;
}
/* only get here if both points are inside the window */
return 1;
}
/* end of line clipping code */
/**
* Group: Pixels
*/
/*
Function: gdImageSetPixel
*/
BGD_DECLARE(void) gdImageSetPixel (gdImagePtr im, int x, int y, int color)
{
int p;
switch (color) {
case gdStyled:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
} else {
p = im->style[im->stylePos++];
}
if (p != (gdTransparent)) {
gdImageSetPixel (im, x, y, p);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdStyledBrushed:
if (!im->style) {
/* Refuse to draw if no style is set. */
return;
}
p = im->style[im->stylePos++];
if ((p != gdTransparent) && (p != 0)) {
gdImageSetPixel (im, x, y, gdBrushed);
}
im->stylePos = im->stylePos % im->styleLength;
break;
case gdBrushed:
gdImageBrushApply (im, x, y);
break;
case gdTiled:
gdImageTileApply (im, x, y);
break;
case gdAntiAliased:
/* This shouldn't happen (2.0.26) because we just call
gdImageAALine now, but do something sane. */
gdImageSetPixel(im, x, y, im->AA_color);
break;
default:
if (gdImageBoundsSafeMacro (im, x, y)) {
if (im->trueColor) {
switch (im->alphaBlendingFlag) {
default:
case gdEffectReplace:
im->tpixels[y][x] = color;
break;
case gdEffectAlphaBlend:
case gdEffectNormal:
im->tpixels[y][x] = gdAlphaBlend(im->tpixels[y][x], color);
break;
case gdEffectOverlay :
im->tpixels[y][x] = gdLayerOverlay(im->tpixels[y][x], color);
break;
case gdEffectMultiply :
im->tpixels[y][x] = gdLayerMultiply(im->tpixels[y][x], color);
break;
}
} else {
im->pixels[y][x] = color;
}
}
break;
}
}
static void
gdImageBrushApply (gdImagePtr im, int x, int y)
{
int lx, ly;
int hy;
int hx;
int x1, y1, x2, y2;
int srcx, srcy;
if (!im->brush) {
return;
}
hy = gdImageSY (im->brush) / 2;
y1 = y - hy;
y2 = y1 + gdImageSY (im->brush);
hx = gdImageSX (im->brush) / 2;
x1 = x - hx;
x2 = x1 + gdImageSX (im->brush);
srcy = 0;
if (im->trueColor) {
if (im->brush->trueColor) {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, p);
}
srcx++;
}
srcy++;
}
} else {
/* 2.0.12: Brush palette, image truecolor (thanks to Thorben Kundinger
for pointing out the issue) */
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p, tc;
p = gdImageGetPixel (im->brush, srcx, srcy);
tc = gdImageGetTrueColorPixel (im->brush, srcx, srcy);
/* 2.0.9, Thomas Winzig: apply simple full transparency */
if (p != gdImageGetTransparent (im->brush)) {
gdImageSetPixel (im, lx, ly, tc);
}
srcx++;
}
srcy++;
}
}
} else {
for (ly = y1; (ly < y2); ly++) {
srcx = 0;
for (lx = x1; (lx < x2); lx++) {
int p;
p = gdImageGetPixel (im->brush, srcx, srcy);
/* Allow for non-square brushes! */
if (p != gdImageGetTransparent (im->brush)) {
/* Truecolor brush. Very slow
on a palette destination. */
if (im->brush->trueColor) {
gdImageSetPixel (im, lx, ly,
gdImageColorResolveAlpha (im,
gdTrueColorGetRed
(p),
gdTrueColorGetGreen
(p),
gdTrueColorGetBlue
(p),
gdTrueColorGetAlpha
(p)));
} else {
gdImageSetPixel (im, lx, ly, im->brushColorMap[p]);
}
}
srcx++;
}
srcy++;
}
}
}
static void
gdImageTileApply (gdImagePtr im, int x, int y)
{
gdImagePtr tile = im->tile;
int srcx, srcy;
int p;
if (!tile) {
return;
}
srcx = x % gdImageSX (tile);
srcy = y % gdImageSY (tile);
if (im->trueColor) {
p = gdImageGetPixel (tile, srcx, srcy);
if (p != gdImageGetTransparent (tile)) {
if (!tile->trueColor) {
p = gdTrueColorAlpha(tile->red[p], tile->green[p], tile->blue[p], tile->alpha[p]);
}
gdImageSetPixel (im, x, y, p);
}
} else {
p = gdImageGetPixel (tile, srcx, srcy);
/* Allow for transparency */
if (p != gdImageGetTransparent (tile)) {
if (tile->trueColor) {
/* Truecolor tile. Very slow
on a palette destination. */
gdImageSetPixel (im, x, y,
gdImageColorResolveAlpha (im,
gdTrueColorGetRed
(p),
gdTrueColorGetGreen
(p),
gdTrueColorGetBlue
(p),
gdTrueColorGetAlpha
(p)));
} else {
gdImageSetPixel (im, x, y, im->tileColorMap[p]);
}
}
}
}
/**
* Function: gdImageGetPixel
*
* Gets a pixel color as stored in the image.
*
* Parameters:
* im - The image.
* x - The x-coordinate.
* y - The y-coordinate.
*
* See also:
* - <gdImageGetTrueColorPixel>
* - <gdImagePalettePixel>
* - <gdImageTrueColorPixel>
*/
BGD_DECLARE(int) gdImageGetPixel (gdImagePtr im, int x, int y)
{
if (gdImageBoundsSafeMacro (im, x, y)) {
if (im->trueColor) {
return im->tpixels[y][x];
} else {
return im->pixels[y][x];
}
} else {
return 0;
}
}
/**
* Function: gdImageGetTrueColorPixel
*
* Gets a pixel color always as truecolor value.
*
* Parameters:
* im - The image.
* x - The x-coordinate.
* y - The y-coordinate.
*
* See also:
* - <gdImageGetPixel>
* - <gdImageTrueColorPixel>
*/
BGD_DECLARE(int) gdImageGetTrueColorPixel (gdImagePtr im, int x, int y)
{
int p = gdImageGetPixel (im, x, y);
if (!im->trueColor) {
return gdTrueColorAlpha (im->red[p], im->green[p], im->blue[p],
(im->transparent == p) ? gdAlphaTransparent :
im->alpha[p]);
} else {
return p;
}
}
/**
* Group: Primitives
*/
/*
Function: gdImageAABlend
NO-OP, kept for library compatibility.
*/
BGD_DECLARE(void) gdImageAABlend (gdImagePtr im)
{
(void)im;
}
static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col);
static void _gdImageFilledHRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color);
static void gdImageHLine(gdImagePtr im, int y, int x1, int x2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
_gdImageFilledHRectangle(im, x1, y - thickhalf, x2, y + im->thick - thickhalf - 1, col);
} else {
if (x2 < x1) {
int t = x2;
x2 = x1;
x1 = t;
}
for (; x1 <= x2; x1++) {
gdImageSetPixel(im, x1, y, col);
}
}
return;
}
static void gdImageVLine(gdImagePtr im, int x, int y1, int y2, int col)
{
if (im->thick > 1) {
int thickhalf = im->thick >> 1;
gdImageFilledRectangle(im, x - thickhalf, y1, x + im->thick - thickhalf - 1, y2, col);
} else {
if (y2 < y1) {
int t = y1;
y1 = y2;
y2 = t;
}
for (; y1 <= y2; y1++) {
gdImageSetPixel(im, x, y1, col);
}
}
return;
}
/*
Function: gdImageLine
Bresenham as presented in Foley & Van Dam.
*/
BGD_DECLARE(void) gdImageLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int wid;
int w, wstart;
int thick;
if (color == gdAntiAliased) {
/*
gdAntiAliased passed as color: use the much faster, much cheaper
and equally attractive gdImageAALine implementation. That
clips too, so don't clip twice.
*/
gdImageAALine(im, x1, y1, x2, y2, im->AA_color);
return;
}
/* 2.0.10: Nick Atty: clip to edges of drawing rectangle, return if no
points need to be drawn. 2.0.26, TBB: clip to edges of clipping
rectangle. We were getting away with this because gdImageSetPixel
is used for actual drawing, but this is still more efficient and opens
the way to skip per-pixel bounds checking in the future. */
if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0)
return;
if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0)
return;
thick = im->thick;
dx = abs (x2 - x1);
dy = abs (y2 - y1);
if (dx == 0) {
gdImageVLine(im, x1, y1, y2, color);
return;
} else if (dy == 0) {
gdImageHLine(im, y1, x1, x2, color);
return;
}
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* Doug Claar: watch out for NaN in atan2 (2.0.5) */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double ac = cos (atan2 (dy, dx));
if (ac != 0) {
wid = thick / ac;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
/* Set up line thickness */
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
}
}
} else {
/* More-or-less vertical. use wid for horizontal stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
if (wid == 0)
wid = 1;
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
/* Set up line thickness */
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
}
}
}
static void dashedSet (gdImagePtr im, int x, int y, int color,
int *onP, int *dashStepP, int wid, int vert);
/*
Function: gdImageDashedLine
*/
BGD_DECLARE(void) gdImageDashedLine (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int dx, dy, incr1, incr2, d, x, y, xend, yend, xdirflag, ydirflag;
int dashStep = 0;
int on = 1;
int wid;
int vert;
int thick = im->thick;
dx = abs (x2 - x1);
dy = abs (y2 - y1);
if (dy <= dx) {
/* More-or-less horizontal. use wid for vertical stroke */
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
vert = 1;
d = 2 * dy - dx;
incr1 = 2 * dy;
incr2 = 2 * (dy - dx);
if (x1 > x2) {
x = x2;
y = y2;
ydirflag = (-1);
xend = x1;
} else {
x = x1;
y = y1;
ydirflag = 1;
xend = x2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
if (((y2 - y1) * ydirflag) > 0) {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y++;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (x < xend) {
x++;
if (d < 0) {
d += incr1;
} else {
y--;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
}
} else {
/* 2.0.12: Michael Schwartz: divide rather than multiply;
TBB: but watch out for /0! */
double as = sin (atan2 (dy, dx));
if (as != 0) {
wid = thick / as;
} else {
wid = 1;
}
vert = 0;
d = 2 * dx - dy;
incr1 = 2 * dx;
incr2 = 2 * (dx - dy);
if (y1 > y2) {
y = y2;
x = x2;
yend = y1;
xdirflag = (-1);
} else {
y = y1;
x = x1;
yend = y2;
xdirflag = 1;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
if (((x2 - x1) * xdirflag) > 0) {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x++;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
} else {
while (y < yend) {
y++;
if (d < 0) {
d += incr1;
} else {
x--;
d += incr2;
}
dashedSet (im, x, y, color, &on, &dashStep, wid, vert);
}
}
}
}
static void
dashedSet (gdImagePtr im, int x, int y, int color,
int *onP, int *dashStepP, int wid, int vert)
{
int dashStep = *dashStepP;
int on = *onP;
int w, wstart;
dashStep++;
if (dashStep == gdDashSize) {
dashStep = 0;
on = !on;
}
if (on) {
if (vert) {
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, x, w, color);
} else {
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++)
gdImageSetPixel (im, w, y, color);
}
}
*dashStepP = dashStep;
*onP = on;
}
/*
Function: gdImageBoundsSafe
*/
BGD_DECLARE(int) gdImageBoundsSafe (gdImagePtr im, int x, int y)
{
return gdImageBoundsSafeMacro (im, x, y);
}
/**
* Function: gdImageChar
*
* Draws a single character.
*
* Parameters:
* im - The image to draw onto.
* f - The raster font.
* x - The x coordinate of the upper left pixel.
* y - The y coordinate of the upper left pixel.
* c - The character.
* color - The color.
*
* Variants:
* - <gdImageCharUp>
*
* See also:
* - <gdFontPtr>
*/
BGD_DECLARE(void) gdImageChar (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py < (y + f->h)); py++) {
for (px = x; (px < (x + f->w)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel (im, px, py, color);
}
cx++;
}
cx = 0;
cy++;
}
}
/**
* Function: gdImageCharUp
*/
BGD_DECLARE(void) gdImageCharUp (gdImagePtr im, gdFontPtr f, int x, int y, int c, int color)
{
int cx, cy;
int px, py;
int fline;
cx = 0;
cy = 0;
#ifdef CHARSET_EBCDIC
c = ASC (c);
#endif /*CHARSET_EBCDIC */
if ((c < f->offset) || (c >= (f->offset + f->nchars))) {
return;
}
fline = (c - f->offset) * f->h * f->w;
for (py = y; (py > (y - f->w)); py--) {
for (px = x; (px < (x + f->h)); px++) {
if (f->data[fline + cy * f->w + cx]) {
gdImageSetPixel (im, px, py, color);
}
cy++;
}
cy = 0;
cx++;
}
}
/**
* Function: gdImageString
*
* Draws a character string.
*
* Parameters:
* im - The image to draw onto.
* f - The raster font.
* x - The x coordinate of the upper left pixel.
* y - The y coordinate of the upper left pixel.
* c - The character string.
* color - The color.
*
* Variants:
* - <gdImageStringUp>
* - <gdImageString16>
* - <gdImageStringUp16>
*
* See also:
* - <gdFontPtr>
* - <gdImageStringTTF>
*/
BGD_DECLARE(void) gdImageString (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageChar (im, f, x, y, s[i], color);
x += f->w;
}
}
/**
* Function: gdImageStringUp
*/
BGD_DECLARE(void) gdImageStringUp (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned char *s, int color)
{
int i;
int l;
l = strlen ((char *) s);
for (i = 0; (i < l); i++) {
gdImageCharUp (im, f, x, y, s[i], color);
y -= f->w;
}
}
static int strlen16 (unsigned short *s);
/**
* Function: gdImageString16
*/
BGD_DECLARE(void) gdImageString16 (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16 (s);
for (i = 0; (i < l); i++) {
gdImageChar (im, f, x, y, s[i], color);
x += f->w;
}
}
/**
* Function: gdImageStringUp16
*/
BGD_DECLARE(void) gdImageStringUp16 (gdImagePtr im, gdFontPtr f,
int x, int y, unsigned short *s, int color)
{
int i;
int l;
l = strlen16 (s);
for (i = 0; (i < l); i++) {
gdImageCharUp (im, f, x, y, s[i], color);
y -= f->w;
}
}
static int
strlen16 (unsigned short *s)
{
int len = 0;
while (*s) {
s++;
len++;
}
return len;
}
#ifndef HAVE_LSQRT
/* If you don't have a nice square root function for longs, you can use
** this hack
*/
long
lsqrt (long n)
{
long result = (long) sqrt ((double) n);
return result;
}
#endif
/* s and e are integers modulo 360 (degrees), with 0 degrees
being the rightmost extreme and degrees changing clockwise.
cx and cy are the center in pixels; w and h are the horizontal
and vertical diameter in pixels. */
/*
Function: gdImageArc
*/
BGD_DECLARE(void) gdImageArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e,
int color)
{
gdImageFilledArc (im, cx, cy, w, h, s, e, color, gdNoFill);
}
/*
Function: gdImageFilledArc
*/
BGD_DECLARE(void) gdImageFilledArc (gdImagePtr im, int cx, int cy, int w, int h, int s, int e,
int color, int style)
{
gdPoint pts[363];
int i, pti;
int lx = 0, ly = 0;
int fx = 0, fy = 0;
int startx = -1, starty = -1, endx = -1, endy = -1;
if ((s % 360) == (e % 360)) {
s = 0;
e = 360;
} else {
if (s > 360) {
s = s % 360;
}
if (e > 360) {
e = e % 360;
}
while (s < 0) {
s += 360;
}
while (e < s) {
e += 360;
}
if (s == e) {
s = 0;
e = 360;
}
}
for (i = s, pti = 1; (i <= e); i++, pti++) {
int x, y;
x = endx = ((long) gdCosT[i % 360] * (long) w / (2 * 1024)) + cx;
y = endy = ((long) gdSinT[i % 360] * (long) h / (2 * 1024)) + cy;
if (i != s) {
if (!(style & gdChord)) {
if (style & gdNoFill) {
gdImageLine (im, lx, ly, x, y, color);
} else {
if (y == ly) {
pti--; /* don't add this point */
if (((i > 270 || i < 90) && x > lx) || ((i > 90 && i < 270) && x < lx)) {
/* replace the old x coord, if increasing on the
right side or decreasing on the left side */
pts[pti].x = x;
}
} else {
pts[pti].x = x;
pts[pti].y = y;
}
}
}
} else {
fx = x;
fy = y;
if (!(style & (gdChord | gdNoFill))) {
pts[0].x = cx;
pts[0].y = cy;
pts[pti].x = startx = x;
pts[pti].y = starty = y;
}
}
lx = x;
ly = y;
}
if (style & gdChord) {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
gdImageLine (im, fx, fy, lx, ly, color);
} else {
pts[0].x = fx;
pts[0].y = fy;
pts[1].x = lx;
pts[1].y = ly;
pts[2].x = cx;
pts[2].y = cy;
gdImageFilledPolygon (im, pts, 3, color);
}
} else {
if (style & gdNoFill) {
if (style & gdEdged) {
gdImageLine (im, cx, cy, lx, ly, color);
gdImageLine (im, cx, cy, fx, fy, color);
}
} else {
if (e - s < 360) {
if (pts[1].x != startx && pts[1].y == starty) {
/* start point has been removed due to y-coord fix => insert it */
for (i = pti; i > 1; i--) {
pts[i].x = pts[i-1].x;
pts[i].y = pts[i-1].y;
}
pts[1].x = startx;
pts[1].y = starty;
pti++;
}
if (pts[pti-1].x != endx && pts[pti-1].y == endy) {
/* end point has been removed due to y-coord fix => insert it */
pts[pti].x = endx;
pts[pti].y = endy;
pti++;
}
}
pts[pti].x = cx;
pts[pti].y = cy;
gdImageFilledPolygon(im, pts, pti+1, color);
}
}
}
/*
Function: gdImageEllipse
*/
BGD_DECLARE(void) gdImageEllipse(gdImagePtr im, int mx, int my, int w, int h, int c)
{
int x=0,mx1=0,mx2=0,my1=0,my2=0;
long aq,bq,dx,dy,r,rx,ry,a,b;
a=w>>1;
b=h>>1;
gdImageSetPixel(im,mx+a, my, c);
gdImageSetPixel(im,mx-a, my, c);
mx1 = mx-a;
my1 = my;
mx2 = mx+a;
my2 = my;
aq = a * a;
bq = b * b;
dx = aq << 1;
dy = bq << 1;
r = a * bq;
rx = r << 1;
ry = 0;
x = a;
while (x > 0) {
if (r > 0) {
my1++;
my2--;
ry +=dx;
r -=ry;
}
if (r <= 0) {
x--;
mx1++;
mx2--;
rx -=dy;
r +=rx;
}
gdImageSetPixel(im,mx1, my1, c);
gdImageSetPixel(im,mx1, my2, c);
gdImageSetPixel(im,mx2, my1, c);
gdImageSetPixel(im,mx2, my2, c);
}
}
/*
Function: gdImageFilledEllipse
*/
BGD_DECLARE(void) gdImageFilledEllipse (gdImagePtr im, int mx, int my, int w, int h, int c)
{
int x=0,mx1=0,mx2=0,my1=0,my2=0;
long aq,bq,dx,dy,r,rx,ry,a,b;
int i;
int old_y2;
a=w>>1;
b=h>>1;
for (x = mx-a; x <= mx+a; x++) {
gdImageSetPixel(im, x, my, c);
}
mx1 = mx-a;
my1 = my;
mx2 = mx+a;
my2 = my;
aq = a * a;
bq = b * b;
dx = aq << 1;
dy = bq << 1;
r = a * bq;
rx = r << 1;
ry = 0;
x = a;
old_y2=-2;
while (x > 0) {
if (r > 0) {
my1++;
my2--;
ry +=dx;
r -=ry;
}
if (r <= 0) {
x--;
mx1++;
mx2--;
rx -=dy;
r +=rx;
}
if(old_y2!=my2) {
for(i=mx1; i<=mx2; i++) {
gdImageSetPixel(im,i,my2,c);
gdImageSetPixel(im,i,my1,c);
}
}
old_y2 = my2;
}
}
/*
Function: gdImageFillToBorder
*/
BGD_DECLARE(void) gdImageFillToBorder (gdImagePtr im, int x, int y, int border, int color)
{
int lastBorder;
/* Seek left */
int leftLimit, rightLimit;
int i;
int restoreAlphaBleding;
if (border < 0 || color < 0) {
/* Refuse to fill to a non-solid border */
return;
}
if (!im->trueColor) {
if (color > (im->colorsTotal - 1) || border > (im->colorsTotal - 1)) {
return;
}
}
leftLimit = (-1);
restoreAlphaBleding = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (x >= im->sx) {
x = im->sx - 1;
} else if (x < 0) {
x = 0;
}
if (y >= im->sy) {
y = im->sy - 1;
} else if (y < 0) {
y = 0;
}
for (i = x; (i >= 0); i--) {
if (gdImageGetPixel (im, i, y) == border) {
break;
}
gdImageSetPixel (im, i, y, color);
leftLimit = i;
}
if (leftLimit == (-1)) {
im->alphaBlendingFlag = restoreAlphaBleding;
return;
}
/* Seek right */
rightLimit = x;
for (i = (x + 1); (i < im->sx); i++) {
if (gdImageGetPixel (im, i, y) == border) {
break;
}
gdImageSetPixel (im, i, y, color);
rightLimit = i;
}
/* Look at lines above and below and start paints */
/* Above */
if (y > 0) {
lastBorder = 1;
for (i = leftLimit; (i <= rightLimit); i++) {
int c;
c = gdImageGetPixel (im, i, y - 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder (im, i, y - 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
/* Below */
if (y < ((im->sy) - 1)) {
lastBorder = 1;
for (i = leftLimit; (i <= rightLimit); i++) {
int c = gdImageGetPixel (im, i, y + 1);
if (lastBorder) {
if ((c != border) && (c != color)) {
gdImageFillToBorder (im, i, y + 1, border, color);
lastBorder = 0;
}
} else if ((c == border) || (c == color)) {
lastBorder = 1;
}
}
}
im->alphaBlendingFlag = restoreAlphaBleding;
}
/*
* set the pixel at (x,y) and its 4-connected neighbors
* with the same pixel value to the new pixel value nc (new color).
* A 4-connected neighbor: pixel above, below, left, or right of a pixel.
* ideas from comp.graphics discussions.
* For tiled fill, the use of a flag buffer is mandatory. As the tile image can
* contain the same color as the color to fill. To do not bloat normal filling
* code I added a 2nd private function.
*/
static int gdImageTileGet (gdImagePtr im, int x, int y)
{
int srcx, srcy;
int tileColor,p;
if (!im->tile) {
return -1;
}
srcx = x % gdImageSX(im->tile);
srcy = y % gdImageSY(im->tile);
p = gdImageGetPixel(im->tile, srcx, srcy);
if (p == im->tile->transparent) {
tileColor = im->transparent;
} else if (im->trueColor) {
if (im->tile->trueColor) {
tileColor = p;
} else {
tileColor = gdTrueColorAlpha( gdImageRed(im->tile,p), gdImageGreen(im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
} else {
if (im->tile->trueColor) {
tileColor = gdImageColorResolveAlpha(im, gdTrueColorGetRed (p), gdTrueColorGetGreen (p), gdTrueColorGetBlue (p), gdTrueColorGetAlpha (p));
} else {
tileColor = gdImageColorResolveAlpha(im, gdImageRed (im->tile,p), gdImageGreen (im->tile,p), gdImageBlue (im->tile,p), gdImageAlpha (im->tile,p));
}
}
return tileColor;
}
/* horizontal segment of scan line y */
struct seg {
int y, xl, xr, dy;
};
/* max depth of stack */
#define FILL_MAX ((int)(im->sy*im->sx)/4)
#define FILL_PUSH(Y, XL, XR, DY) \
if (sp<stack+FILL_MAX && Y+(DY)>=0 && Y+(DY)<wy2) \
{sp->y = Y; sp->xl = XL; sp->xr = XR; sp->dy = DY; sp++;}
#define FILL_POP(Y, XL, XR, DY) \
{sp--; Y = sp->y+(DY = sp->dy); XL = sp->xl; XR = sp->xr;}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc);
/*
Function: gdImageFill
*/
BGD_DECLARE(void) gdImageFill(gdImagePtr im, int x, int y, int nc)
{
int l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
int alphablending_bak;
/* stack of filled segments */
/* struct seg stack[FILL_MAX],*sp = stack; */
struct seg *stack;
struct seg *sp;
if (!im->trueColor && nc > (im->colorsTotal - 1)) {
return;
}
alphablending_bak = im->alphaBlendingFlag;
im->alphaBlendingFlag = 0;
if (nc==gdTiled) {
_gdImageFillTiled(im,x,y,nc);
im->alphaBlendingFlag = alphablending_bak;
return;
}
wx2=im->sx;
wy2=im->sy;
oc = gdImageGetPixel(im, x, y);
if (oc==nc || x<0 || x>wx2 || y<0 || y>wy2) {
im->alphaBlendingFlag = alphablending_bak;
return;
}
/* Do not use the 4 neighbors implementation with
* small images
*/
if (im->sx < 4) {
int ix = x, iy = y, c;
do {
do {
c = gdImageGetPixel(im, ix, iy);
if (c != oc) {
goto done;
}
gdImageSetPixel(im, ix, iy, nc);
} while(ix++ < (im->sx -1));
ix = x;
} while(iy++ < (im->sy -1));
goto done;
}
if(overflow2(im->sy, im->sx)) {
return;
}
if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) {
return;
}
stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4));
if (!stack) {
return;
}
sp = stack;
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && gdImageGetPixel(im,x, y)==oc; x--) {
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for (; x<=wx2 && gdImageGetPixel(im,x, y)==oc; x++) {
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip:
for (x++; x<=x2 && (gdImageGetPixel(im, x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
gdFree(stack);
done:
im->alphaBlendingFlag = alphablending_bak;
}
static void _gdImageFillTiled(gdImagePtr im, int x, int y, int nc)
{
int l, x1, x2, dy;
int oc; /* old pixel value */
int wx2,wy2;
/* stack of filled segments */
struct seg *stack;
struct seg *sp;
char *pts;
if (!im->tile) {
return;
}
wx2=im->sx;
wy2=im->sy;
if(overflow2(im->sy, im->sx)) {
return;
}
if(overflow2(sizeof(struct seg), ((im->sy * im->sx) / 4))) {
return;
}
pts = (char *) gdCalloc(im->sy * im->sx, sizeof(char));
if (!pts) {
return;
}
stack = (struct seg *)gdMalloc(sizeof(struct seg) * ((int)(im->sy*im->sx)/4));
if (!stack) {
gdFree(pts);
return;
}
sp = stack;
oc = gdImageGetPixel(im, x, y);
/* required! */
FILL_PUSH(y,x,x,1);
/* seed segment (popped 1st) */
FILL_PUSH(y+1, x, x, -1);
while (sp>stack) {
FILL_POP(y, x1, x2, dy);
for (x=x1; x>=0 && (!pts[y + x*wy2] && gdImageGetPixel(im,x,y)==oc); x--) {
nc = gdImageTileGet(im,x,y);
pts[y + x*wy2]=1;
gdImageSetPixel(im,x, y, nc);
}
if (x>=x1) {
goto skip;
}
l = x+1;
/* leak on left? */
if (l<x1) {
FILL_PUSH(y, l, x1-1, -dy);
}
x = x1+1;
do {
for (; x<wx2 && (!pts[y + x*wy2] && gdImageGetPixel(im,x, y)==oc) ; x++) {
if (pts[y + x*wy2]) {
/* we should never be here */
break;
}
nc = gdImageTileGet(im,x,y);
pts[y + x*wy2]=1;
gdImageSetPixel(im, x, y, nc);
}
FILL_PUSH(y, l, x-1, dy);
/* leak on right? */
if (x>x2+1) {
FILL_PUSH(y, x2+1, x-1, -dy);
}
skip:
for (x++; x<=x2 && (pts[y + x*wy2] || gdImageGetPixel(im,x, y)!=oc); x++);
l = x;
} while (x<=x2);
}
gdFree(pts);
gdFree(stack);
}
/**
* Function: gdImageRectangle
*
* Draws a rectangle.
*
* Parameters:
* im - The image.
* x1 - The x-coordinate of one of the corners.
* y1 - The y-coordinate of one of the corners.
* x2 - The x-coordinate of another corner.
* y2 - The y-coordinate of another corner.
* color - The color.
*
* See also:
* - <gdImageFilledRectangle>
*/
BGD_DECLARE(void) gdImageRectangle (gdImagePtr im, int x1, int y1, int x2, int y2, int color)
{
int thick = im->thick;
if (x1 == x2 && y1 == y2 && thick == 1) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (y2 < y1) {
int t = y1;
y1 = y2;
y2 = t;
}
if (x2 < x1) {
int t = x1;
x1 = x2;
x2 = t;
}
if (thick > 1) {
int cx, cy, x1ul, y1ul, x2lr, y2lr;
int half = thick >> 1;
x1ul = x1 - half;
y1ul = y1 - half;
x2lr = x2 + half;
y2lr = y2 + half;
cy = y1ul + thick;
while (cy-- > y1ul) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y2lr - thick;
while (cy++ < y2lr) {
cx = x1ul - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x1ul - 1;
while (cx++ < x1ul + thick) {
gdImageSetPixel(im, cx, cy, color);
}
}
cy = y1ul + thick - 1;
while (cy++ < y2lr -thick) {
cx = x2lr - thick - 1;
while (cx++ < x2lr) {
gdImageSetPixel(im, cx, cy, color);
}
}
return;
} else {
if (x1 == x2 || y1 == y2) {
gdImageLine(im, x1, y1, x2, y2, color);
} else {
gdImageLine(im, x1, y1, x2, y1, color);
gdImageLine(im, x1, y2, x2, y2, color);
gdImageLine(im, x1, y1 + 1, x1, y2 - 1, color);
gdImageLine(im, x2, y1 + 1, x2, y2 - 1, color);
}
}
}
static void _gdImageFilledHRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
int x, y;
if (x1 == x2 && y1 == y2) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (x1 > x2) {
x = x1;
x1 = x2;
x2 = x;
}
if (y1 > y2) {
y = y1;
y1 = y2;
y2 = y;
}
if (x1 < 0) {
x1 = 0;
}
if (x2 >= gdImageSX(im)) {
x2 = gdImageSX(im) - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y2 >= gdImageSY(im)) {
y2 = gdImageSY(im) - 1;
}
for (x = x1; (x <= x2); x++) {
for (y = y1; (y <= y2); y++) {
gdImageSetPixel (im, x, y, color);
}
}
}
static void _gdImageFilledVRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
int x, y;
if (x1 == x2 && y1 == y2) {
gdImageSetPixel(im, x1, y1, color);
return;
}
if (x1 > x2) {
x = x1;
x1 = x2;
x2 = x;
}
if (y1 > y2) {
y = y1;
y1 = y2;
y2 = y;
}
if (x1 < 0) {
x1 = 0;
}
if (x2 >= gdImageSX(im)) {
x2 = gdImageSX(im) - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y2 >= gdImageSY(im)) {
y2 = gdImageSY(im) - 1;
}
for (y = y1; (y <= y2); y++) {
for (x = x1; (x <= x2); x++) {
gdImageSetPixel (im, x, y, color);
}
}
}
/*
Function: gdImageFilledRectangle
*/
BGD_DECLARE(void) gdImageFilledRectangle (gdImagePtr im, int x1, int y1, int x2, int y2,
int color)
{
_gdImageFilledVRectangle(im, x1, y1, x2, y2, color);
}
/**
* Group: Cloning and Copying
*/
/**
* Function: gdImageClone
*
* Clones an image
*
* Creates an exact duplicate of the given image.
*
* Parameters:
* src - The source image.
*
* Returns:
* The cloned image on success, NULL on failure.
*/
BGD_DECLARE(gdImagePtr) gdImageClone (gdImagePtr src) {
gdImagePtr dst;
register int i, x;
if (src->trueColor) {
dst = gdImageCreateTrueColor(src->sx , src->sy);
} else {
dst = gdImageCreate(src->sx , src->sy);
}
if (dst == NULL) {
return NULL;
}
if (src->trueColor == 0) {
dst->colorsTotal = src->colorsTotal;
for (i = 0; i < gdMaxColors; i++) {
dst->red[i] = src->red[i];
dst->green[i] = src->green[i];
dst->blue[i] = src->blue[i];
dst->alpha[i] = src->alpha[i];
dst->open[i] = src->open[i];
}
for (i = 0; i < src->sy; i++) {
for (x = 0; x < src->sx; x++) {
dst->pixels[i][x] = src->pixels[i][x];
}
}
} else {
for (i = 0; i < src->sy; i++) {
for (x = 0; x < src->sx; x++) {
dst->tpixels[i][x] = src->tpixels[i][x];
}
}
}
dst->interlace = src->interlace;
dst->alphaBlendingFlag = src->alphaBlendingFlag;
dst->saveAlphaFlag = src->saveAlphaFlag;
dst->AA = src->AA;
dst->AA_color = src->AA_color;
dst->AA_dont_blend = src->AA_dont_blend;
dst->cx1 = src->cx1;
dst->cy1 = src->cy1;
dst->cx2 = src->cx2;
dst->cy2 = src->cy2;
dst->res_x = src->res_x;
dst->res_y = src->res_y;
dst->paletteQuantizationMethod = src->paletteQuantizationMethod;
dst->paletteQuantizationSpeed = src->paletteQuantizationSpeed;
dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality;
dst->paletteQuantizationMinQuality = src->paletteQuantizationMinQuality;
dst->interpolation_id = src->interpolation_id;
dst->interpolation = src->interpolation;
if (src->brush) {
dst->brush = gdImageClone(src->brush);
}
if (src->tile) {
dst->tile = gdImageClone(src->tile);
}
if (src->style) {
gdImageSetStyle(dst, src->style, src->styleLength);
dst->stylePos = src->stylePos;
}
for (i = 0; i < gdMaxColors; i++) {
dst->brushColorMap[i] = src->brushColorMap[i];
dst->tileColorMap[i] = src->tileColorMap[i];
}
if (src->polyAllocated > 0) {
dst->polyAllocated = src->polyAllocated;
for (i = 0; i < src->polyAllocated; i++) {
dst->polyInts[i] = src->polyInts[i];
}
}
return dst;
}
/**
* Function: gdImageCopy
*
* Copy an area of an image to another image
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
*
* See also:
* - <gdImageCopyMerge>
* - <gdImageCopyMergeGray>
*/
BGD_DECLARE(void) gdImageCopy (gdImagePtr dst, gdImagePtr src, int dstX, int dstY, int srcX,
int srcY, int w, int h)
{
int c;
int x, y;
int tox, toy;
int i;
int colorMap[gdMaxColors];
if (dst->trueColor) {
/* 2.0: much easier when the destination is truecolor. */
/* 2.0.10: needs a transparent-index check that is still valid if
* * the source is not truecolor. Thanks to Frank Warmerdam.
*/
if (src->trueColor) {
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetTrueColorPixel (src, srcX + x, srcY + y);
if (c != src->transparent) {
gdImageSetPixel (dst, dstX + x, dstY + y, c);
}
}
}
} else {
/* source is palette based */
for (y = 0; (y < h); y++) {
for (x = 0; (x < w); x++) {
int c = gdImageGetPixel (src, srcX + x, srcY + y);
if (c != src->transparent) {
gdImageSetPixel(dst, dstX + x, dstY + y, gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]));
}
}
}
}
return;
}
for (i = 0; (i < gdMaxColors); i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
int mapTo;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/* Have we established a mapping for this color? */
if (src->trueColor) {
/* 2.05: remap to the palette available in the
destination image. This is slow and
works badly, but it beats crashing! Thanks
to Padhrig McCarthy. */
mapTo = gdImageColorResolveAlpha (dst,
gdTrueColorGetRed (c),
gdTrueColorGetGreen (c),
gdTrueColorGetBlue (c),
gdTrueColorGetAlpha (c));
} else if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Get best match possible. This
function never returns error. */
nc = gdImageColorResolveAlpha (dst,
src->red[c], src->green[c],
src->blue[c], src->alpha[c]);
}
colorMap[c] = nc;
mapTo = colorMap[c];
} else {
mapTo = colorMap[c];
}
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyMerge
*
* Copy an area of an image to another image ignoring alpha
*
* The source area will be copied to the destination are by merging the pixels.
*
* Note:
* This function is a substitute for real alpha channel operations,
* so it doesn't pay attention to the alpha channel.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
* pct - The percentage in range 0..100.
*
* See also:
* - <gdImageCopy>
* - <gdImageCopyMergeGray>
*/
BGD_DECLARE(void) gdImageCopyMerge (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
dc = gdImageGetPixel (dst, tox, toy);
ncR = gdImageRed (src, c) * (pct / 100.0)
+ gdImageRed (dst, dc) * ((100 - pct) / 100.0);
ncG = gdImageGreen (src, c) * (pct / 100.0)
+ gdImageGreen (dst, dc) * ((100 - pct) / 100.0);
ncB = gdImageBlue (src, c) * (pct / 100.0)
+ gdImageBlue (dst, dc) * ((100 - pct) / 100.0);
/* Find a reasonable color */
nc = gdImageColorResolve (dst, ncR, ncG, ncB);
}
gdImageSetPixel (dst, tox, toy, nc);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyMergeGray
*
* Copy an area of an image to another image ignoring alpha
*
* The source area will be copied to the grayscaled destination area by merging
* the pixels.
*
* Note:
* This function is a substitute for real alpha channel operations,
* so it doesn't pay attention to the alpha channel.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* w - The width of the area to copy.
* h - The height of the area to copy.
* pct - The percentage of the source color intensity in range 0..100.
*
* See also:
* - <gdImageCopy>
* - <gdImageCopyMerge>
*/
BGD_DECLARE(void) gdImageCopyMergeGray (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int w, int h, int pct)
{
int c, dc;
int x, y;
int tox, toy;
int ncR, ncG, ncB;
float g;
toy = dstY;
for (y = srcY; (y < (srcY + h)); y++) {
tox = dstX;
for (x = srcX; (x < (srcX + w)); x++) {
int nc;
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox++;
continue;
}
/*
* If it's the same image, mapping is NOT trivial since we
* merge with greyscale target, but if pct is 100, the grey
* value is not used, so it becomes trivial. pjw 2.0.12.
*/
if (dst == src && pct == 100) {
nc = c;
} else {
dc = gdImageGetPixel (dst, tox, toy);
g = 0.29900 * gdImageRed(dst, dc)
+ 0.58700 * gdImageGreen(dst, dc) + 0.11400 * gdImageBlue(dst, dc);
ncR = gdImageRed (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
ncG = gdImageGreen (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
ncB = gdImageBlue (src, c) * (pct / 100.0)
+ g * ((100 - pct) / 100.0);
/* First look for an exact match */
nc = gdImageColorExact (dst, ncR, ncG, ncB);
if (nc == (-1)) {
/* No, so try to allocate it */
nc = gdImageColorAllocate (dst, ncR, ncG, ncB);
/* If we're out of colors, go for the
closest color */
if (nc == (-1)) {
nc = gdImageColorClosest (dst, ncR, ncG, ncB);
}
}
}
gdImageSetPixel (dst, tox, toy, nc);
tox++;
}
toy++;
}
}
/**
* Function: gdImageCopyResized
*
* Copy a resized area from an image to another image
*
* If the source and destination area differ in size, the area will be resized
* using nearest-neighbor interpolation.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* dstW - The width of the area to copy to.
* dstH - The height of the area to copy to.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
*
* See also:
* - <gdImageCopyResampled>
* - <gdImageScale>
*/
BGD_DECLARE(void) gdImageCopyResized (gdImagePtr dst, gdImagePtr src, int dstX, int dstY,
int srcX, int srcY, int dstW, int dstH, int srcW,
int srcH)
{
int c;
int x, y;
int tox, toy;
int ydest;
int i;
int colorMap[gdMaxColors];
/* Stretch vectors */
int *stx;
int *sty;
/* We only need to use floating point to determine the correct
stretch vector for one line's worth. */
if (overflow2(sizeof (int), srcW)) {
return;
}
if (overflow2(sizeof (int), srcH)) {
return;
}
stx = (int *) gdMalloc (sizeof (int) * srcW);
if (!stx) {
return;
}
sty = (int *) gdMalloc (sizeof (int) * srcH);
if (!sty) {
gdFree(stx);
return;
}
/* Fixed by Mao Morimoto 2.0.16 */
for (i = 0; (i < srcW); i++) {
stx[i] = dstW * (i + 1) / srcW - dstW * i / srcW;
}
for (i = 0; (i < srcH); i++) {
sty[i] = dstH * (i + 1) / srcH - dstH * i / srcH;
}
for (i = 0; (i < gdMaxColors); i++) {
colorMap[i] = (-1);
}
toy = dstY;
for (y = srcY; (y < (srcY + srcH)); y++) {
for (ydest = 0; (ydest < sty[y - srcY]); ydest++) {
tox = dstX;
for (x = srcX; (x < (srcX + srcW)); x++) {
int nc = 0;
int mapTo;
if (!stx[x - srcX]) {
continue;
}
if (dst->trueColor) {
/* 2.0.9: Thorben Kundinger: Maybe the source image is not
a truecolor image */
if (!src->trueColor) {
int tmp = gdImageGetPixel (src, x, y);
mapTo = gdImageGetTrueColorPixel (src, x, y);
if (gdImageGetTransparent (src) == tmp) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
} else {
/* TK: old code follows */
mapTo = gdImageGetTrueColorPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == mapTo) {
/* 2.0.21, TK: not tox++ */
tox += stx[x - srcX];
continue;
}
}
} else {
c = gdImageGetPixel (src, x, y);
/* Added 7/24/95: support transparent copies */
if (gdImageGetTransparent (src) == c) {
tox += stx[x - srcX];
continue;
}
if (src->trueColor) {
/* Remap to the palette available in the
destination image. This is slow and
works badly. */
mapTo = gdImageColorResolveAlpha (dst,
gdTrueColorGetRed (c),
gdTrueColorGetGreen
(c),
gdTrueColorGetBlue
(c),
gdTrueColorGetAlpha
(c));
} else {
/* Have we established a mapping for this color? */
if (colorMap[c] == (-1)) {
/* If it's the same image, mapping is trivial */
if (dst == src) {
nc = c;
} else {
/* Find or create the best match */
/* 2.0.5: can't use gdTrueColorGetRed, etc with palette */
nc = gdImageColorResolveAlpha (dst,
gdImageRed (src,
c),
gdImageGreen
(src, c),
gdImageBlue (src,
c),
gdImageAlpha
(src, c));
}
colorMap[c] = nc;
}
mapTo = colorMap[c];
}
}
for (i = 0; (i < stx[x - srcX]); i++) {
gdImageSetPixel (dst, tox, toy, mapTo);
tox++;
}
}
toy++;
}
}
gdFree (stx);
gdFree (sty);
}
/**
* Function: gdImageCopyRotated
*
* Copy a rotated area from an image to another image
*
* The area is counter-clockwise rotated using nearest-neighbor interpolation.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the center of the area to copy to.
* dstY - The y-coordinate of the center of the area to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
* angle - The angle in degrees.
*
* See also:
* - <gdImageRotateInterpolated>
*/
BGD_DECLARE(void) gdImageCopyRotated (gdImagePtr dst,
gdImagePtr src,
double dstX, double dstY,
int srcX, int srcY,
int srcWidth, int srcHeight, int angle)
{
double dx, dy;
double radius = sqrt (srcWidth * srcWidth + srcHeight * srcHeight);
double aCos = cos (angle * .0174532925);
double aSin = sin (angle * .0174532925);
double scX = srcX + ((double) srcWidth) / 2;
double scY = srcY + ((double) srcHeight) / 2;
int cmap[gdMaxColors];
int i;
/*
2.0.34: transparency preservation. The transparentness of
the transparent color is more important than its hue.
*/
if (src->transparent != -1) {
if (dst->transparent == -1) {
dst->transparent = src->transparent;
}
}
for (i = 0; (i < gdMaxColors); i++) {
cmap[i] = (-1);
}
for (dy = dstY - radius; (dy <= dstY + radius); dy++) {
for (dx = dstX - radius; (dx <= dstX + radius); dx++) {
double sxd = (dx - dstX) * aCos - (dy - dstY) * aSin;
double syd = (dy - dstY) * aCos + (dx - dstX) * aSin;
int sx = sxd + scX;
int sy = syd + scY;
if ((sx >= srcX) && (sx < srcX + srcWidth) &&
(sy >= srcY) && (sy < srcY + srcHeight)) {
int c = gdImageGetPixel (src, sx, sy);
/* 2.0.34: transparency wins */
if (c == src->transparent) {
gdImageSetPixel (dst, dx, dy, dst->transparent);
} else if (!src->trueColor) {
/* Use a table to avoid an expensive
lookup on every single pixel */
if (cmap[c] == -1) {
cmap[c] = gdImageColorResolveAlpha (dst,
gdImageRed (src, c),
gdImageGreen (src,
c),
gdImageBlue (src,
c),
gdImageAlpha (src,
c));
}
gdImageSetPixel (dst, dx, dy, cmap[c]);
} else {
gdImageSetPixel (dst,
dx, dy,
gdImageColorResolveAlpha (dst,
gdImageRed (src,
c),
gdImageGreen
(src, c),
gdImageBlue (src,
c),
gdImageAlpha
(src, c)));
}
}
}
}
}
/* When gd 1.x was first created, floating point was to be avoided.
These days it is often faster than table lookups or integer
arithmetic. The routine below is shamelessly, gloriously
floating point. TBB */
/* 2.0.10: cast instead of floor() yields 35% performance improvement.
Thanks to John Buckman. */
#define floor2(exp) ((long) exp)
/*#define floor2(exp) floor(exp)*/
/**
* Function: gdImageCopyResampled
*
* Copy a resampled area from an image to another image
*
* If the source and destination area differ in size, the area will be resized
* using bilinear interpolation for truecolor images, and nearest-neighbor
* interpolation for palette images.
*
* Parameters:
* dst - The destination image.
* src - The source image.
* dstX - The x-coordinate of the upper left corner to copy to.
* dstY - The y-coordinate of the upper left corner to copy to.
* srcX - The x-coordinate of the upper left corner to copy from.
* srcY - The y-coordinate of the upper left corner to copy from.
* dstW - The width of the area to copy to.
* dstH - The height of the area to copy to.
* srcW - The width of the area to copy from.
* srcH - The height of the area to copy from.
*
* See also:
* - <gdImageCopyResized>
* - <gdImageScale>
*/
BGD_DECLARE(void) gdImageCopyResampled (gdImagePtr dst,
gdImagePtr src,
int dstX, int dstY,
int srcX, int srcY,
int dstW, int dstH, int srcW, int srcH)
{
int x, y;
if (!dst->trueColor) {
gdImageCopyResized (dst, src, dstX, dstY, srcX, srcY, dstW, dstH, srcW, srcH);
return;
}
for (y = dstY; (y < dstY + dstH); y++) {
for (x = dstX; (x < dstX + dstW); x++) {
float sy1, sy2, sx1, sx2;
float sx, sy;
float spixels = 0.0;
float red = 0.0, green = 0.0, blue = 0.0, alpha = 0.0;
float alpha_factor, alpha_sum = 0.0, contrib_sum = 0.0;
sy1 = ((float)(y - dstY)) * (float)srcH / (float)dstH;
sy2 = ((float)(y + 1 - dstY)) * (float) srcH / (float) dstH;
sy = sy1;
do {
float yportion;
if (floorf(sy) == floorf(sy1)) {
yportion = 1.0 - (sy - floorf(sy));
if (yportion > sy2 - sy1) {
yportion = sy2 - sy1;
}
sy = floorf(sy);
} else if (sy == floorf(sy2)) {
yportion = sy2 - floorf(sy2);
} else {
yportion = 1.0;
}
sx1 = ((float)(x - dstX)) * (float) srcW / dstW;
sx2 = ((float)(x + 1 - dstX)) * (float) srcW / dstW;
sx = sx1;
do {
float xportion;
float pcontribution;
int p;
if (floorf(sx) == floorf(sx1)) {
xportion = 1.0 - (sx - floorf(sx));
if (xportion > sx2 - sx1) {
xportion = sx2 - sx1;
}
sx = floorf(sx);
} else if (sx == floorf(sx2)) {
xportion = sx2 - floorf(sx2);
} else {
xportion = 1.0;
}
pcontribution = xportion * yportion;
p = gdImageGetTrueColorPixel(src, (int) sx + srcX, (int) sy + srcY);
alpha_factor = ((gdAlphaMax - gdTrueColorGetAlpha(p))) * pcontribution;
red += gdTrueColorGetRed (p) * alpha_factor;
green += gdTrueColorGetGreen (p) * alpha_factor;
blue += gdTrueColorGetBlue (p) * alpha_factor;
alpha += gdTrueColorGetAlpha (p) * pcontribution;
alpha_sum += alpha_factor;
contrib_sum += pcontribution;
spixels += xportion * yportion;
sx += 1.0;
}
while (sx < sx2);
sy += 1.0f;
}
while (sy < sy2);
if (spixels != 0.0) {
red /= spixels;
green /= spixels;
blue /= spixels;
alpha /= spixels;
}
if ( alpha_sum != 0.0) {
if( contrib_sum != 0.0) {
alpha_sum /= contrib_sum;
}
red /= alpha_sum;
green /= alpha_sum;
blue /= alpha_sum;
}
/* Clamping to allow for rounding errors above */
if (red > 255.0) {
red = 255.0;
}
if (green > 255.0) {
green = 255.0;
}
if (blue > 255.0f) {
blue = 255.0;
}
if (alpha > gdAlphaMax) {
alpha = gdAlphaMax;
}
gdImageSetPixel(dst, x, y, gdTrueColorAlpha ((int) red, (int) green, (int) blue, (int) alpha));
}
}
}
/**
* Group: Polygons
*/
/**
* Function: gdImagePolygon
*
* Draws a closed polygon
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color.
*
* See also:
* - <gdImageOpenPolygon>
* - <gdImageFilledPolygon>
*/
BGD_DECLARE(void) gdImagePolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
if (n <= 0) {
return;
}
gdImageLine (im, p->x, p->y, p[n - 1].x, p[n - 1].y, c);
gdImageOpenPolygon (im, p, n, c);
}
/**
* Function: gdImageOpenPolygon
*
* Draws an open polygon
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color
*
* See also:
* - <gdImagePolygon>
*/
BGD_DECLARE(void) gdImageOpenPolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int lx, ly;
if (n <= 0) {
return;
}
lx = p->x;
ly = p->y;
for (i = 1; (i < n); i++) {
p++;
gdImageLine (im, lx, ly, p->x, p->y, c);
lx = p->x;
ly = p->y;
}
}
/* THANKS to Kirsten Schulz for the polygon fixes! */
/* The intersection finding technique of this code could be improved */
/* by remembering the previous intertersection, and by using the slope. */
/* That could help to adjust intersections to produce a nice */
/* interior_extrema. */
/**
* Function: gdImageFilledPolygon
*
* Draws a filled polygon
*
* The polygon is filled using the even-odd fillrule what can leave unfilled
* regions inside of self-intersecting polygons. This behavior might change in
* a future version.
*
* Parameters:
* im - The image.
* p - The vertices as array of <gdPoint>s.
* n - The number of vertices.
* c - The color
*
* See also:
* - <gdImagePolygon>
*/
BGD_DECLARE(void) gdImageFilledPolygon (gdImagePtr im, gdPointPtr p, int n, int c)
{
int i;
int j;
int index;
int y;
int miny, maxy, pmaxy;
int x1, y1;
int x2, y2;
int ind1, ind2;
int ints;
int fill_color;
if (n <= 0) {
return;
}
if (c == gdAntiAliased) {
fill_color = im->AA_color;
} else {
fill_color = c;
}
if (!im->polyAllocated) {
if (overflow2(sizeof (int), n)) {
return;
}
im->polyInts = (int *) gdMalloc (sizeof (int) * n);
if (!im->polyInts) {
return;
}
im->polyAllocated = n;
}
if (im->polyAllocated < n) {
while (im->polyAllocated < n) {
im->polyAllocated *= 2;
}
if (overflow2(sizeof (int), im->polyAllocated)) {
return;
}
im->polyInts = (int *) gdReallocEx (im->polyInts,
sizeof (int) * im->polyAllocated);
if (!im->polyInts) {
return;
}
}
miny = p[0].y;
maxy = p[0].y;
for (i = 1; (i < n); i++) {
if (p[i].y < miny) {
miny = p[i].y;
}
if (p[i].y > maxy) {
maxy = p[i].y;
}
}
/* necessary special case: horizontal line */
if (n > 1 && miny == maxy) {
x1 = x2 = p[0].x;
for (i = 1; (i < n); i++) {
if (p[i].x < x1) {
x1 = p[i].x;
} else if (p[i].x > x2) {
x2 = p[i].x;
}
}
gdImageLine(im, x1, miny, x2, miny, c);
return;
}
pmaxy = maxy;
/* 2.0.16: Optimization by Ilia Chipitsine -- don't waste time offscreen */
/* 2.0.26: clipping rectangle is even better */
if (miny < im->cy1) {
miny = im->cy1;
}
if (maxy > im->cy2) {
maxy = im->cy2;
}
/* Fix in 1.3: count a vertex only once */
for (y = miny; (y <= maxy); y++) {
ints = 0;
for (i = 0; (i < n); i++) {
if (!i) {
ind1 = n - 1;
ind2 = 0;
} else {
ind1 = i - 1;
ind2 = i;
}
y1 = p[ind1].y;
y2 = p[ind2].y;
if (y1 < y2) {
x1 = p[ind1].x;
x2 = p[ind2].x;
} else if (y1 > y2) {
y2 = p[ind1].y;
y1 = p[ind2].y;
x2 = p[ind1].x;
x1 = p[ind2].x;
} else {
continue;
}
/* Do the following math as float intermediately, and round to ensure
* that Polygon and FilledPolygon for the same set of points have the
* same footprint. */
if ((y >= y1) && (y < y2)) {
im->polyInts[ints++] = (int) ((float) ((y - y1) * (x2 - x1)) /
(float) (y2 - y1) + 0.5 + x1);
} else if ((y == pmaxy) && (y == y2)) {
im->polyInts[ints++] = x2;
}
}
/*
2.0.26: polygons pretty much always have less than 100 points,
and most of the time they have considerably less. For such trivial
cases, insertion sort is a good choice. Also a good choice for
future implementations that may wish to indirect through a table.
*/
for (i = 1; (i < ints); i++) {
index = im->polyInts[i];
j = i;
while ((j > 0) && (im->polyInts[j - 1] > index)) {
im->polyInts[j] = im->polyInts[j - 1];
j--;
}
im->polyInts[j] = index;
}
for (i = 0; (i < (ints-1)); i += 2) {
/* 2.0.29: back to gdImageLine to prevent segfaults when
performing a pattern fill */
gdImageLine (im, im->polyInts[i], y, im->polyInts[i + 1], y,
fill_color);
}
}
/* If we are drawing this AA, then redraw the border with AA lines. */
/* This doesn't work as well as I'd like, but it doesn't clash either. */
if (c == gdAntiAliased) {
gdImagePolygon (im, p, n, c);
}
}
/**
* Group: other
*/
static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t);
/**
* Function: gdImageSetStyle
*
* Sets the style for following drawing operations
*
* Parameters:
* im - The image.
* style - An array of color values.
* noOfPixel - The number of color values.
*/
BGD_DECLARE(void) gdImageSetStyle (gdImagePtr im, int *style, int noOfPixels)
{
if (im->style) {
gdFree (im->style);
}
if (overflow2(sizeof (int), noOfPixels)) {
return;
}
im->style = (int *) gdMalloc (sizeof (int) * noOfPixels);
if (!im->style) {
return;
}
memcpy (im->style, style, sizeof (int) * noOfPixels);
im->styleLength = noOfPixels;
im->stylePos = 0;
}
/**
* Function: gdImageSetThickness
*
* Sets the thickness for following drawing operations
*
* Parameters:
* im - The image.
* thickness - The thickness in pixels.
*/
BGD_DECLARE(void) gdImageSetThickness (gdImagePtr im, int thickness)
{
im->thick = thickness;
}
/**
* Function: gdImageSetBrush
*
* Sets the brush for following drawing operations
*
* Parameters:
* im - The image.
* brush - The brush image.
*/
BGD_DECLARE(void) gdImageSetBrush (gdImagePtr im, gdImagePtr brush)
{
int i;
im->brush = brush;
if ((!im->trueColor) && (!im->brush->trueColor)) {
for (i = 0; (i < gdImageColorsTotal (brush)); i++) {
int index;
index = gdImageColorResolveAlpha (im,
gdImageRed (brush, i),
gdImageGreen (brush, i),
gdImageBlue (brush, i),
gdImageAlpha (brush, i));
im->brushColorMap[i] = index;
}
}
}
/*
Function: gdImageSetTile
*/
BGD_DECLARE(void) gdImageSetTile (gdImagePtr im, gdImagePtr tile)
{
int i;
im->tile = tile;
if ((!im->trueColor) && (!im->tile->trueColor)) {
for (i = 0; (i < gdImageColorsTotal (tile)); i++) {
int index;
index = gdImageColorResolveAlpha (im,
gdImageRed (tile, i),
gdImageGreen (tile, i),
gdImageBlue (tile, i),
gdImageAlpha (tile, i));
im->tileColorMap[i] = index;
}
}
}
/**
* Function: gdImageSetAntiAliased
*
* Set the color for subsequent anti-aliased drawing
*
* If <gdAntiAliased> is passed as color to drawing operations that support
* anti-aliased drawing (such as <gdImageLine> and <gdImagePolygon>), the actual
* color to be used can be set with this function.
*
* Example: draw an anti-aliased blue line:
* | gdImageSetAntiAliased(im, gdTrueColorAlpha(0, 0, gdBlueMax, gdAlphaOpaque));
* | gdImageLine(im, 10,10, 20,20, gdAntiAliased);
*
* Parameters:
* im - The image.
* c - The color.
*
* See also:
* - <gdImageSetAntiAliasedDontBlend>
*/
BGD_DECLARE(void) gdImageSetAntiAliased (gdImagePtr im, int c)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = -1;
}
/**
* Function: gdImageSetAntiAliasedDontBlend
*
* Set the color and "dont_blend" color for subsequent anti-aliased drawing
*
* This extended variant of <gdImageSetAntiAliased> allows to also specify a
* (background) color that will not be blended in anti-aliased drawing
* operations.
*
* Parameters:
* im - The image.
* c - The color.
* dont_blend - Whether to blend.
*/
BGD_DECLARE(void) gdImageSetAntiAliasedDontBlend (gdImagePtr im, int c, int dont_blend)
{
im->AA = 1;
im->AA_color = c;
im->AA_dont_blend = dont_blend;
}
/**
* Function: gdImageInterlace
*
* Sets whether an image is interlaced
*
* This is relevant only when saving the image in a format that supports
* interlacing.
*
* Parameters:
* im - The image.
* interlaceArg - Whether the image is interlaced.
*
* See also:
* - <gdImageGetInterlaced>
*/
BGD_DECLARE(void) gdImageInterlace (gdImagePtr im, int interlaceArg)
{
im->interlace = interlaceArg;
}
/**
* Function: gdImageCompare
*
* Compare two images
*
* Parameters:
* im1 - An image.
* im2 - Another image.
*
* Returns:
* A bitmask of <Image Comparison> flags where each set flag signals
* which attributes of the images are different.
*/
BGD_DECLARE(int) gdImageCompare (gdImagePtr im1, gdImagePtr im2)
{
int x, y;
int p1, p2;
int cmpStatus = 0;
int sx, sy;
if (im1->interlace != im2->interlace) {
cmpStatus |= GD_CMP_INTERLACE;
}
if (im1->transparent != im2->transparent) {
cmpStatus |= GD_CMP_TRANSPARENT;
}
if (im1->trueColor != im2->trueColor) {
cmpStatus |= GD_CMP_TRUECOLOR;
}
sx = im1->sx;
if (im1->sx != im2->sx) {
cmpStatus |= GD_CMP_SIZE_X + GD_CMP_IMAGE;
if (im2->sx < im1->sx) {
sx = im2->sx;
}
}
sy = im1->sy;
if (im1->sy != im2->sy) {
cmpStatus |= GD_CMP_SIZE_Y + GD_CMP_IMAGE;
if (im2->sy < im1->sy) {
sy = im2->sy;
}
}
if (im1->colorsTotal != im2->colorsTotal) {
cmpStatus |= GD_CMP_NUM_COLORS;
}
for (y = 0; (y < sy); y++) {
for (x = 0; (x < sx); x++) {
p1 =
im1->trueColor ? gdImageTrueColorPixel (im1, x,
y) :
gdImagePalettePixel (im1, x, y);
p2 =
im2->trueColor ? gdImageTrueColorPixel (im2, x,
y) :
gdImagePalettePixel (im2, x, y);
if (gdImageRed (im1, p1) != gdImageRed (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageGreen (im1, p1) != gdImageGreen (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
if (gdImageBlue (im1, p1) != gdImageBlue (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#if 0
/* Soon we'll add alpha channel to palettes */
if (gdImageAlpha (im1, p1) != gdImageAlpha (im2, p2)) {
cmpStatus |= GD_CMP_COLOR + GD_CMP_IMAGE;
break;
}
#endif
}
if (cmpStatus & GD_CMP_COLOR) {
break;
};
}
return cmpStatus;
}
/* Thanks to Frank Warmerdam for this superior implementation
of gdAlphaBlend(), which merges alpha in the
destination color much better. */
/**
* Function: gdAlphaBlend
*
* Blend two colors
*
* Parameters:
* dst - The color to blend onto.
* src - The color to blend.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdLayerOverlay>
* - <gdLayerMultiply>
*/
BGD_DECLARE(int) gdAlphaBlend (int dst, int src)
{
int src_alpha = gdTrueColorGetAlpha(src);
int dst_alpha, alpha, red, green, blue;
int src_weight, dst_weight, tot_weight;
/* -------------------------------------------------------------------- */
/* Simple cases we want to handle fast. */
/* -------------------------------------------------------------------- */
if( src_alpha == gdAlphaOpaque )
return src;
dst_alpha = gdTrueColorGetAlpha(dst);
if( src_alpha == gdAlphaTransparent )
return dst;
if( dst_alpha == gdAlphaTransparent )
return src;
/* -------------------------------------------------------------------- */
/* What will the source and destination alphas be? Note that */
/* the destination weighting is substantially reduced as the */
/* overlay becomes quite opaque. */
/* -------------------------------------------------------------------- */
src_weight = gdAlphaTransparent - src_alpha;
dst_weight = (gdAlphaTransparent - dst_alpha) * src_alpha / gdAlphaMax;
tot_weight = src_weight + dst_weight;
/* -------------------------------------------------------------------- */
/* What red, green and blue result values will we use? */
/* -------------------------------------------------------------------- */
alpha = src_alpha * dst_alpha / gdAlphaMax;
red = (gdTrueColorGetRed(src) * src_weight
+ gdTrueColorGetRed(dst) * dst_weight) / tot_weight;
green = (gdTrueColorGetGreen(src) * src_weight
+ gdTrueColorGetGreen(dst) * dst_weight) / tot_weight;
blue = (gdTrueColorGetBlue(src) * src_weight
+ gdTrueColorGetBlue(dst) * dst_weight) / tot_weight;
/* -------------------------------------------------------------------- */
/* Return merged result. */
/* -------------------------------------------------------------------- */
return ((alpha << 24) + (red << 16) + (green << 8) + blue);
}
static int gdAlphaOverlayColor (int src, int dst, int max );
/**
* Function: gdLayerOverlay
*
* Overlay two colors
*
* Parameters:
* dst - The color to overlay onto.
* src - The color to overlay.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdAlphaBlend>
* - <gdLayerMultiply>
*/
BGD_DECLARE(int) gdLayerOverlay (int dst, int src)
{
int a1, a2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(dst);
a2 = gdAlphaMax - gdTrueColorGetAlpha(src);
return ( ((gdAlphaMax - a1*a2/gdAlphaMax) << 24) +
(gdAlphaOverlayColor( gdTrueColorGetRed(src), gdTrueColorGetRed(dst), gdRedMax ) << 16) +
(gdAlphaOverlayColor( gdTrueColorGetGreen(src), gdTrueColorGetGreen(dst), gdGreenMax ) << 8) +
(gdAlphaOverlayColor( gdTrueColorGetBlue(src), gdTrueColorGetBlue(dst), gdBlueMax ))
);
}
/* Apply 'overlay' effect - background pixels are colourised by the foreground colour */
static int gdAlphaOverlayColor (int src, int dst, int max )
{
dst = dst << 1;
if( dst > max ) {
/* in the "light" zone */
return dst + (src << 1) - (dst * src / max) - max;
} else {
/* in the "dark" zone */
return dst * src / max;
}
}
/**
* Function: gdLayerMultiply
*
* Overlay two colors with multiply effect
*
* Parameters:
* dst - The color to overlay onto.
* src - The color to overlay.
*
* See also:
* - <gdImageAlphaBlending>
* - <gdAlphaBlend>
* - <gdLayerOverlay>
*/
BGD_DECLARE(int) gdLayerMultiply (int dst, int src)
{
int a1, a2, r1, r2, g1, g2, b1, b2;
a1 = gdAlphaMax - gdTrueColorGetAlpha(src);
a2 = gdAlphaMax - gdTrueColorGetAlpha(dst);
r1 = gdRedMax - (a1 * (gdRedMax - gdTrueColorGetRed(src))) / gdAlphaMax;
r2 = gdRedMax - (a2 * (gdRedMax - gdTrueColorGetRed(dst))) / gdAlphaMax;
g1 = gdGreenMax - (a1 * (gdGreenMax - gdTrueColorGetGreen(src))) / gdAlphaMax;
g2 = gdGreenMax - (a2 * (gdGreenMax - gdTrueColorGetGreen(dst))) / gdAlphaMax;
b1 = gdBlueMax - (a1 * (gdBlueMax - gdTrueColorGetBlue(src))) / gdAlphaMax;
b2 = gdBlueMax - (a2 * (gdBlueMax - gdTrueColorGetBlue(dst))) / gdAlphaMax ;
a1 = gdAlphaMax - a1;
a2 = gdAlphaMax - a2;
return ( ((a1*a2/gdAlphaMax) << 24) +
((r1*r2/gdRedMax) << 16) +
((g1*g2/gdGreenMax) << 8) +
((b1*b2/gdBlueMax))
);
}
/**
* Function: gdImageAlphaBlending
*
* Set the effect for subsequent drawing operations
*
* Note that the effect is used for truecolor images only.
*
* Parameters:
* im - The image.
* alphaBlendingArg - The effect.
*
* See also:
* - <Effects>
*/
BGD_DECLARE(void) gdImageAlphaBlending (gdImagePtr im, int alphaBlendingArg)
{
im->alphaBlendingFlag = alphaBlendingArg;
}
/**
* Function: gdImageSaveAlpha
*
* Sets the save alpha flag
*
* The save alpha flag specifies whether the alpha channel of the pixels should
* be saved. This is supported only for image formats that support full alpha
* transparency, e.g. PNG.
*/
BGD_DECLARE(void) gdImageSaveAlpha (gdImagePtr im, int saveAlphaArg)
{
im->saveAlphaFlag = saveAlphaArg;
}
/**
* Function: gdImageSetClip
*
* Sets the clipping rectangle
*
* The clipping rectangle restricts the drawing area for following drawing
* operations.
*
* Parameters:
* im - The image.
* x1 - The x-coordinate of the upper left corner.
* y1 - The y-coordinate of the upper left corner.
* x2 - The x-coordinate of the lower right corner.
* y2 - The y-coordinate of the lower right corner.
*
* See also:
* - <gdImageGetClip>
*/
BGD_DECLARE(void) gdImageSetClip (gdImagePtr im, int x1, int y1, int x2, int y2)
{
if (x1 < 0) {
x1 = 0;
}
if (x1 >= im->sx) {
x1 = im->sx - 1;
}
if (x2 < 0) {
x2 = 0;
}
if (x2 >= im->sx) {
x2 = im->sx - 1;
}
if (y1 < 0) {
y1 = 0;
}
if (y1 >= im->sy) {
y1 = im->sy - 1;
}
if (y2 < 0) {
y2 = 0;
}
if (y2 >= im->sy) {
y2 = im->sy - 1;
}
im->cx1 = x1;
im->cy1 = y1;
im->cx2 = x2;
im->cy2 = y2;
}
/**
* Function: gdImageGetClip
*
* Gets the current clipping rectangle
*
* Parameters:
* im - The image.
* x1P - (out) The x-coordinate of the upper left corner.
* y1P - (out) The y-coordinate of the upper left corner.
* x2P - (out) The x-coordinate of the lower right corner.
* y2P - (out) The y-coordinate of the lower right corner.
*
* See also:
* - <gdImageSetClip>
*/
BGD_DECLARE(void) gdImageGetClip (gdImagePtr im, int *x1P, int *y1P, int *x2P, int *y2P)
{
*x1P = im->cx1;
*y1P = im->cy1;
*x2P = im->cx2;
*y2P = im->cy2;
}
/**
* Function: gdImageSetResolution
*
* Sets the resolution of an image.
*
* Parameters:
* im - The image.
* res_x - The horizontal resolution in DPI.
* res_y - The vertical resolution in DPI.
*
* See also:
* - <gdImageResolutionX>
* - <gdImageResolutionY>
*/
BGD_DECLARE(void) gdImageSetResolution(gdImagePtr im, const unsigned int res_x, const unsigned int res_y)
{
if (res_x > 0) im->res_x = res_x;
if (res_y > 0) im->res_y = res_y;
}
/*
* Added on 2003/12 by Pierre-Alain Joye (pajoye@pearfr.org)
* */
#define BLEND_COLOR(a, nc, c, cc) \
nc = (cc) + (((((c) - (cc)) * (a)) + ((((c) - (cc)) * (a)) >> 8) + 0x80) >> 8);
static void gdImageSetAAPixelColor(gdImagePtr im, int x, int y, int color, int t)
{
int dr,dg,db,p,r,g,b;
/* 2.0.34: watch out for out of range calls */
if (!gdImageBoundsSafeMacro(im, x, y)) {
return;
}
p = gdImageGetPixel(im,x,y);
/* TBB: we have to implement the dont_blend stuff to provide
the full feature set of the old implementation */
if ((p == color)
|| ((p == im->AA_dont_blend)
&& (t != 0x00))) {
return;
}
dr = gdTrueColorGetRed(color);
dg = gdTrueColorGetGreen(color);
db = gdTrueColorGetBlue(color);
r = gdTrueColorGetRed(p);
g = gdTrueColorGetGreen(p);
b = gdTrueColorGetBlue(p);
BLEND_COLOR(t, dr, r, dr);
BLEND_COLOR(t, dg, g, dg);
BLEND_COLOR(t, db, b, db);
im->tpixels[y][x] = gdTrueColorAlpha(dr, dg, db, gdAlphaOpaque);
}
static void gdImageAALine (gdImagePtr im, int x1, int y1, int x2, int y2, int col)
{
/* keep them as 32bits */
long x, y, inc, frac;
long dx, dy,tmp;
int w, wid, wstart;
int thick = im->thick;
if (!im->trueColor) {
/* TBB: don't crash when the image is of the wrong type */
gdImageLine(im, x1, y1, x2, y2, col);
return;
}
/* TBB: use the clipping rectangle */
if (clip_1d (&x1, &y1, &x2, &y2, im->cx1, im->cx2) == 0)
return;
if (clip_1d (&y1, &x1, &y2, &x2, im->cy1, im->cy2) == 0)
return;
dx = x2 - x1;
dy = y2 - y1;
if (dx == 0 && dy == 0) {
/* TBB: allow setting points */
gdImageSetPixel(im, x1, y1, col);
return;
} else {
double ag;
/* Cast the long to an int to avoid compiler warnings about truncation.
* This isn't a problem as computed dy/dx values came from ints above. */
ag = fabs(abs((int)dy) < abs((int)dx) ? cos(atan2(dy, dx)) : sin(atan2(dy, dx)));
if (ag != 0) {
wid = thick / ag;
} else {
wid = 1;
}
if (wid == 0) {
wid = 1;
}
}
/* Axis aligned lines */
if (dx == 0) {
gdImageVLine(im, x1, y1, y2, col);
return;
} else if (dy == 0) {
gdImageHLine(im, y1, x1, x2, col);
return;
}
if (abs((int)dx) > abs((int)dy)) {
if (dx < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
y = y1;
inc = (dy * 65536) / dx;
frac = 0;
/* TBB: set the last pixel for consistency (<=) */
for (x = x1 ; x <= x2 ; x++) {
wstart = y - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetAAPixelColor(im, x , w , col , (frac >> 8) & 0xFF);
gdImageSetAAPixelColor(im, x , w + 1 , col, (~frac >> 8) & 0xFF);
}
frac += inc;
if (frac >= 65536) {
frac -= 65536;
y++;
} else if (frac < 0) {
frac += 65536;
y--;
}
}
} else {
if (dy < 0) {
tmp = x1;
x1 = x2;
x2 = tmp;
tmp = y1;
y1 = y2;
y2 = tmp;
dx = x2 - x1;
dy = y2 - y1;
}
x = x1;
inc = (dx * 65536) / dy;
frac = 0;
/* TBB: set the last pixel for consistency (<=) */
for (y = y1 ; y <= y2 ; y++) {
wstart = x - wid / 2;
for (w = wstart; w < wstart + wid; w++) {
gdImageSetAAPixelColor(im, w , y , col, (frac >> 8) & 0xFF);
gdImageSetAAPixelColor(im, w + 1, y, col, (~frac >> 8) & 0xFF);
}
frac += inc;
if (frac >= 65536) {
frac -= 65536;
x++;
} else if (frac < 0) {
frac += 65536;
x--;
}
}
}
}
/**
* Function: gdImagePaletteToTrueColor
*
* Convert a palette image to true color
*
* Parameters:
* src - The image.
*
* Returns:
* Non-zero if the conversion succeeded, zero otherwise.
*
* See also:
* - <gdImageTrueColorToPalette>
*/
BGD_DECLARE(int) gdImagePaletteToTrueColor(gdImagePtr src)
{
unsigned int y;
unsigned int yy;
if (src == NULL) {
return 0;
}
if (src->trueColor == 1) {
return 1;
} else {
unsigned int x;
const unsigned int sy = gdImageSY(src);
const unsigned int sx = gdImageSX(src);
src->tpixels = (int **) gdMalloc(sizeof(int *) * sy);
if (src->tpixels == NULL) {
return 0;
}
for (y = 0; y < sy; y++) {
const unsigned char *src_row = src->pixels[y];
int * dst_row;
/* no need to calloc it, we overwrite all pxl anyway */
src->tpixels[y] = (int *) gdMalloc(sx * sizeof(int));
if (src->tpixels[y] == NULL) {
goto clean_on_error;
}
dst_row = src->tpixels[y];
for (x = 0; x < sx; x++) {
const unsigned char c = *(src_row + x);
if (c == src->transparent) {
*(dst_row + x) = gdTrueColorAlpha(0, 0, 0, 127);
} else {
*(dst_row + x) = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
}
}
}
/* free old palette buffer (y is sy) */
for (yy = 0; yy < y; yy++) {
gdFree(src->pixels[yy]);
}
gdFree(src->pixels);
src->trueColor = 1;
src->pixels = NULL;
src->alphaBlendingFlag = 0;
src->saveAlphaFlag = 1;
if (src->transparent >= 0) {
const unsigned char c = src->transparent;
src->transparent = gdTrueColorAlpha(src->red[c], src->green[c], src->blue[c], src->alpha[c]);
}
return 1;
clean_on_error:
/* free new true color buffer (y is not allocated, have failed) */
for (yy = 0; yy < y; yy++) {
gdFree(src->tpixels[yy]);
}
gdFree(src->tpixels);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_272_0 |
crossvul-cpp_data_bad_528_2 | #define DEFINE_INLINES
#include "vterm_internal.h"
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include "utf8.h"
/*****************
* API functions *
*****************/
static void *default_malloc(size_t size, void *allocdata UNUSED)
{
void *ptr = malloc(size);
if(ptr)
memset(ptr, 0, size);
return ptr;
}
static void default_free(void *ptr, void *allocdata UNUSED)
{
free(ptr);
}
static VTermAllocatorFunctions default_allocator = {
&default_malloc, // malloc
&default_free // free
};
VTerm *vterm_new(int rows, int cols)
{
return vterm_new_with_allocator(rows, cols, &default_allocator, NULL);
}
VTerm *vterm_new_with_allocator(int rows, int cols, VTermAllocatorFunctions *funcs, void *allocdata)
{
/* Need to bootstrap using the allocator function directly */
VTerm *vt = (*funcs->malloc)(sizeof(VTerm), allocdata);
vt->allocator = funcs;
vt->allocdata = allocdata;
vt->rows = rows;
vt->cols = cols;
vt->parser.state = NORMAL;
vt->parser.callbacks = NULL;
vt->parser.cbdata = NULL;
vt->parser.strbuffer_len = 500; /* should be able to hold an OSC string */
vt->parser.strbuffer_cur = 0;
vt->parser.strbuffer = vterm_allocator_malloc(vt, vt->parser.strbuffer_len);
vt->outbuffer_len = 200;
vt->outbuffer_cur = 0;
vt->outbuffer = vterm_allocator_malloc(vt, vt->outbuffer_len);
return vt;
}
void vterm_free(VTerm *vt)
{
if(vt->screen)
vterm_screen_free(vt->screen);
if(vt->state)
vterm_state_free(vt->state);
vterm_allocator_free(vt, vt->parser.strbuffer);
vterm_allocator_free(vt, vt->outbuffer);
vterm_allocator_free(vt, vt);
}
INTERNAL void *vterm_allocator_malloc(VTerm *vt, size_t size)
{
return (*vt->allocator->malloc)(size, vt->allocdata);
}
INTERNAL void vterm_allocator_free(VTerm *vt, void *ptr)
{
(*vt->allocator->free)(ptr, vt->allocdata);
}
void vterm_get_size(const VTerm *vt, int *rowsp, int *colsp)
{
if(rowsp)
*rowsp = vt->rows;
if(colsp)
*colsp = vt->cols;
}
void vterm_set_size(VTerm *vt, int rows, int cols)
{
vt->rows = rows;
vt->cols = cols;
if(vt->parser.callbacks && vt->parser.callbacks->resize)
(*vt->parser.callbacks->resize)(rows, cols, vt->parser.cbdata);
}
int vterm_get_utf8(const VTerm *vt)
{
return vt->mode.utf8;
}
void vterm_set_utf8(VTerm *vt, int is_utf8)
{
vt->mode.utf8 = is_utf8;
}
INTERNAL void vterm_push_output_bytes(VTerm *vt, const char *bytes, size_t len)
{
if(len > vt->outbuffer_len - vt->outbuffer_cur) {
DEBUG_LOG("vterm_push_output(): buffer overflow; truncating output\n");
len = vt->outbuffer_len - vt->outbuffer_cur;
}
memcpy(vt->outbuffer + vt->outbuffer_cur, bytes, len);
vt->outbuffer_cur += len;
}
static int outbuffer_is_full(VTerm *vt)
{
return vt->outbuffer_cur >= vt->outbuffer_len - 1;
}
#if (defined(_XOPEN_SOURCE) && _XOPEN_SOURCE >= 500) \
|| defined(_ISOC99_SOURCE) || defined(_BSD_SOURCE)
# undef VSNPRINTF
# define VSNPRINTF vsnprintf
#else
# ifdef VSNPRINTF
/* Use a provided vsnprintf() function. */
int VSNPRINTF(char *str, size_t str_m, const char *fmt, va_list ap);
# endif
#endif
INTERNAL void vterm_push_output_vsprintf(VTerm *vt, const char *format, va_list args)
{
int written;
#ifndef VSNPRINTF
/* When vsnprintf() is not available (C90) fall back to vsprintf(). */
char buffer[1024]; /* 1Kbyte is enough for everybody, right? */
#endif
if(outbuffer_is_full(vt)) {
DEBUG_LOG("vterm_push_output(): buffer overflow; truncating output\n");
return;
}
#ifdef VSNPRINTF
written = VSNPRINTF(vt->outbuffer + vt->outbuffer_cur,
vt->outbuffer_len - vt->outbuffer_cur,
format, args);
if(written == (int)(vt->outbuffer_len - vt->outbuffer_cur)) {
/* output was truncated */
vt->outbuffer_cur = vt->outbuffer_len - 1;
}
else
vt->outbuffer_cur += written;
#else
written = vsprintf(buffer, format, args);
if(written >= (int)(vt->outbuffer_len - vt->outbuffer_cur - 1)) {
/* output was truncated */
written = vt->outbuffer_len - vt->outbuffer_cur - 1;
}
if (written > 0)
{
strncpy(vt->outbuffer + vt->outbuffer_cur, buffer, written + 1);
vt->outbuffer_cur += written;
}
#endif
}
INTERNAL void vterm_push_output_sprintf(VTerm *vt, const char *format, ...)
{
va_list args;
va_start(args, format);
vterm_push_output_vsprintf(vt, format, args);
va_end(args);
}
INTERNAL void vterm_push_output_sprintf_ctrl(VTerm *vt, unsigned char ctrl, const char *fmt, ...)
{
size_t orig_cur = vt->outbuffer_cur;
va_list args;
if(ctrl >= 0x80 && !vt->mode.ctrl8bit)
vterm_push_output_sprintf(vt, ESC_S "%c", ctrl - 0x40);
else
vterm_push_output_sprintf(vt, "%c", ctrl);
va_start(args, fmt);
vterm_push_output_vsprintf(vt, fmt, args);
va_end(args);
if(outbuffer_is_full(vt))
vt->outbuffer_cur = orig_cur;
}
INTERNAL void vterm_push_output_sprintf_dcs(VTerm *vt, const char *fmt, ...)
{
size_t orig_cur = vt->outbuffer_cur;
va_list args;
if(!vt->mode.ctrl8bit)
vterm_push_output_sprintf(vt, ESC_S "%c", C1_DCS - 0x40);
else
vterm_push_output_sprintf(vt, "%c", C1_DCS);
va_start(args, fmt);
vterm_push_output_vsprintf(vt, fmt, args);
va_end(args);
vterm_push_output_sprintf_ctrl(vt, C1_ST, "");
if(outbuffer_is_full(vt))
vt->outbuffer_cur = orig_cur;
}
size_t vterm_output_get_buffer_size(const VTerm *vt)
{
return vt->outbuffer_len;
}
size_t vterm_output_get_buffer_current(const VTerm *vt)
{
return vt->outbuffer_cur;
}
size_t vterm_output_get_buffer_remaining(const VTerm *vt)
{
return vt->outbuffer_len - vt->outbuffer_cur;
}
size_t vterm_output_read(VTerm *vt, char *buffer, size_t len)
{
if(len > vt->outbuffer_cur)
len = vt->outbuffer_cur;
memcpy(buffer, vt->outbuffer, len);
if(len < vt->outbuffer_cur)
memmove(vt->outbuffer, vt->outbuffer + len, vt->outbuffer_cur - len);
vt->outbuffer_cur -= len;
return len;
}
VTermValueType vterm_get_attr_type(VTermAttr attr)
{
switch(attr) {
case VTERM_ATTR_BOLD: return VTERM_VALUETYPE_BOOL;
case VTERM_ATTR_UNDERLINE: return VTERM_VALUETYPE_INT;
case VTERM_ATTR_ITALIC: return VTERM_VALUETYPE_BOOL;
case VTERM_ATTR_BLINK: return VTERM_VALUETYPE_BOOL;
case VTERM_ATTR_REVERSE: return VTERM_VALUETYPE_BOOL;
case VTERM_ATTR_STRIKE: return VTERM_VALUETYPE_BOOL;
case VTERM_ATTR_FONT: return VTERM_VALUETYPE_INT;
case VTERM_ATTR_FOREGROUND: return VTERM_VALUETYPE_COLOR;
case VTERM_ATTR_BACKGROUND: return VTERM_VALUETYPE_COLOR;
case VTERM_N_ATTRS: return 0;
}
return 0; /* UNREACHABLE */
}
VTermValueType vterm_get_prop_type(VTermProp prop)
{
switch(prop) {
case VTERM_PROP_CURSORVISIBLE: return VTERM_VALUETYPE_BOOL;
case VTERM_PROP_CURSORBLINK: return VTERM_VALUETYPE_BOOL;
case VTERM_PROP_ALTSCREEN: return VTERM_VALUETYPE_BOOL;
case VTERM_PROP_TITLE: return VTERM_VALUETYPE_STRING;
case VTERM_PROP_ICONNAME: return VTERM_VALUETYPE_STRING;
case VTERM_PROP_REVERSE: return VTERM_VALUETYPE_BOOL;
case VTERM_PROP_CURSORSHAPE: return VTERM_VALUETYPE_INT;
case VTERM_PROP_MOUSE: return VTERM_VALUETYPE_INT;
case VTERM_PROP_CURSORCOLOR: return VTERM_VALUETYPE_STRING;
case VTERM_N_PROPS: return 0;
}
return 0; /* UNREACHABLE */
}
void vterm_scroll_rect(VTermRect rect,
int downward,
int rightward,
int (*moverect)(VTermRect src, VTermRect dest, void *user),
int (*eraserect)(VTermRect rect, int selective, void *user),
void *user)
{
VTermRect src;
VTermRect dest;
if(abs(downward) >= rect.end_row - rect.start_row ||
abs(rightward) >= rect.end_col - rect.start_col) {
/* Scroll more than area; just erase the lot */
(*eraserect)(rect, 0, user);
return;
}
if(rightward >= 0) {
/* rect: [XXX................]
* src: [----------------]
* dest: [----------------]
*/
dest.start_col = rect.start_col;
dest.end_col = rect.end_col - rightward;
src.start_col = rect.start_col + rightward;
src.end_col = rect.end_col;
}
else {
/* rect: [................XXX]
* src: [----------------]
* dest: [----------------]
*/
int leftward = -rightward;
dest.start_col = rect.start_col + leftward;
dest.end_col = rect.end_col;
src.start_col = rect.start_col;
src.end_col = rect.end_col - leftward;
}
if(downward >= 0) {
dest.start_row = rect.start_row;
dest.end_row = rect.end_row - downward;
src.start_row = rect.start_row + downward;
src.end_row = rect.end_row;
}
else {
int upward = -downward;
dest.start_row = rect.start_row + upward;
dest.end_row = rect.end_row;
src.start_row = rect.start_row;
src.end_row = rect.end_row - upward;
}
if(moverect)
(*moverect)(dest, src, user);
if(downward > 0)
rect.start_row = rect.end_row - downward;
else if(downward < 0)
rect.end_row = rect.start_row - downward;
if(rightward > 0)
rect.start_col = rect.end_col - rightward;
else if(rightward < 0)
rect.end_col = rect.start_col - rightward;
(*eraserect)(rect, 0, user);
}
void vterm_copy_cells(VTermRect dest,
VTermRect src,
void (*copycell)(VTermPos dest, VTermPos src, void *user),
void *user)
{
int downward = src.start_row - dest.start_row;
int rightward = src.start_col - dest.start_col;
int init_row, test_row, init_col, test_col;
int inc_row, inc_col;
VTermPos pos;
if(downward < 0) {
init_row = dest.end_row - 1;
test_row = dest.start_row - 1;
inc_row = -1;
}
else /* downward >= 0 */ {
init_row = dest.start_row;
test_row = dest.end_row;
inc_row = +1;
}
if(rightward < 0) {
init_col = dest.end_col - 1;
test_col = dest.start_col - 1;
inc_col = -1;
}
else /* rightward >= 0 */ {
init_col = dest.start_col;
test_col = dest.end_col;
inc_col = +1;
}
for(pos.row = init_row; pos.row != test_row; pos.row += inc_row)
for(pos.col = init_col; pos.col != test_col; pos.col += inc_col) {
VTermPos srcpos;
srcpos.row = pos.row + downward;
srcpos.col = pos.col + rightward;
(*copycell)(pos, srcpos, user);
}
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_528_2 |
crossvul-cpp_data_good_635_0 | /*
* Copyright (c) 2007 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/rbtree.h>
#include <linux/dma-mapping.h> /* for DMA_*_DEVICE */
#include "rds.h"
/*
* XXX
* - build with sparse
* - should we detect duplicate keys on a socket? hmm.
* - an rdma is an mlock, apply rlimit?
*/
/*
* get the number of pages by looking at the page indices that the start and
* end addresses fall in.
*
* Returns 0 if the vec is invalid. It is invalid if the number of bytes
* causes the address to wrap or overflows an unsigned int. This comes
* from being stored in the 'length' member of 'struct scatterlist'.
*/
static unsigned int rds_pages_in_vec(struct rds_iovec *vec)
{
if ((vec->addr + vec->bytes <= vec->addr) ||
(vec->bytes > (u64)UINT_MAX))
return 0;
return ((vec->addr + vec->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT) -
(vec->addr >> PAGE_SHIFT);
}
static struct rds_mr *rds_mr_tree_walk(struct rb_root *root, u64 key,
struct rds_mr *insert)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct rds_mr *mr;
while (*p) {
parent = *p;
mr = rb_entry(parent, struct rds_mr, r_rb_node);
if (key < mr->r_key)
p = &(*p)->rb_left;
else if (key > mr->r_key)
p = &(*p)->rb_right;
else
return mr;
}
if (insert) {
rb_link_node(&insert->r_rb_node, parent, p);
rb_insert_color(&insert->r_rb_node, root);
refcount_inc(&insert->r_refcount);
}
return NULL;
}
/*
* Destroy the transport-specific part of a MR.
*/
static void rds_destroy_mr(struct rds_mr *mr)
{
struct rds_sock *rs = mr->r_sock;
void *trans_private = NULL;
unsigned long flags;
rdsdebug("RDS: destroy mr key is %x refcnt %u\n",
mr->r_key, refcount_read(&mr->r_refcount));
if (test_and_set_bit(RDS_MR_DEAD, &mr->r_state))
return;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
if (!RB_EMPTY_NODE(&mr->r_rb_node))
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
trans_private = mr->r_trans_private;
mr->r_trans_private = NULL;
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (trans_private)
mr->r_trans->free_mr(trans_private, mr->r_invalidate);
}
void __rds_put_mr_final(struct rds_mr *mr)
{
rds_destroy_mr(mr);
kfree(mr);
}
/*
* By the time this is called we can't have any more ioctls called on
* the socket so we don't need to worry about racing with others.
*/
void rds_rdma_drop_keys(struct rds_sock *rs)
{
struct rds_mr *mr;
struct rb_node *node;
unsigned long flags;
/* Release any MRs associated with this socket */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
while ((node = rb_first(&rs->rs_rdma_keys))) {
mr = rb_entry(node, struct rds_mr, r_rb_node);
if (mr->r_trans == rs->rs_transport)
mr->r_invalidate = 0;
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
rds_destroy_mr(mr);
rds_mr_put(mr);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (rs->rs_transport && rs->rs_transport->flush_mrs)
rs->rs_transport->flush_mrs();
}
/*
* Helper function to pin user pages.
*/
static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
struct page **pages, int write)
{
int ret;
ret = get_user_pages_fast(user_addr, nr_pages, write, pages);
if (ret >= 0 && ret < nr_pages) {
while (ret--)
put_page(pages[ret]);
ret = -EFAULT;
}
return ret;
}
static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
u64 *cookie_ret, struct rds_mr **mr_ret)
{
struct rds_mr *mr = NULL, *found;
unsigned int nr_pages;
struct page **pages = NULL;
struct scatterlist *sg;
void *trans_private;
unsigned long flags;
rds_rdma_cookie_t cookie;
unsigned int nents;
long i;
int ret;
if (rs->rs_bound_addr == 0 || !rs->rs_transport) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out;
}
if (!rs->rs_transport->get_mr) {
ret = -EOPNOTSUPP;
goto out;
}
nr_pages = rds_pages_in_vec(&args->vec);
if (nr_pages == 0) {
ret = -EINVAL;
goto out;
}
/* Restrict the size of mr irrespective of underlying transport
* To account for unaligned mr regions, subtract one from nr_pages
*/
if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
ret = -EMSGSIZE;
goto out;
}
rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
args->vec.addr, args->vec.bytes, nr_pages);
/* XXX clamp nr_pages to limit the size of this alloc? */
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
mr = kzalloc(sizeof(struct rds_mr), GFP_KERNEL);
if (!mr) {
ret = -ENOMEM;
goto out;
}
refcount_set(&mr->r_refcount, 1);
RB_CLEAR_NODE(&mr->r_rb_node);
mr->r_trans = rs->rs_transport;
mr->r_sock = rs;
if (args->flags & RDS_RDMA_USE_ONCE)
mr->r_use_once = 1;
if (args->flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
if (args->flags & RDS_RDMA_READWRITE)
mr->r_write = 1;
/*
* Pin the pages that make up the user buffer and transfer the page
* pointers to the mr's sg array. We check to see if we've mapped
* the whole region after transferring the partial page references
* to the sg array so that we can have one page ref cleanup path.
*
* For now we have no flag that tells us whether the mapping is
* r/o or r/w. We need to assume r/w, or we'll do a lot of RDMA to
* the zero page.
*/
ret = rds_pin_pages(args->vec.addr, nr_pages, pages, 1);
if (ret < 0)
goto out;
nents = ret;
sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
}
WARN_ON(!nents);
sg_init_table(sg, nents);
/* Stick all pages into the scatterlist */
for (i = 0 ; i < nents; i++)
sg_set_page(&sg[i], pages[i], PAGE_SIZE, 0);
rdsdebug("RDS: trans_private nents is %u\n", nents);
/* Obtain a transport specific MR. If this succeeds, the
* s/g list is now owned by the MR.
* Note that dma_map() implies that pending writes are
* flushed to RAM, so no dma_sync is needed here. */
trans_private = rs->rs_transport->get_mr(sg, nents, rs,
&mr->r_key);
if (IS_ERR(trans_private)) {
for (i = 0 ; i < nents; i++)
put_page(sg_page(&sg[i]));
kfree(sg);
ret = PTR_ERR(trans_private);
goto out;
}
mr->r_trans_private = trans_private;
rdsdebug("RDS: get_mr put_user key is %x cookie_addr %p\n",
mr->r_key, (void *)(unsigned long) args->cookie_addr);
/* The user may pass us an unaligned address, but we can only
* map page aligned regions. So we keep the offset, and build
* a 64bit cookie containing <R_Key, offset> and pass that
* around. */
cookie = rds_rdma_make_cookie(mr->r_key, args->vec.addr & ~PAGE_MASK);
if (cookie_ret)
*cookie_ret = cookie;
if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
ret = -EFAULT;
goto out;
}
/* Inserting the new MR into the rbtree bumps its
* reference count. */
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
found = rds_mr_tree_walk(&rs->rs_rdma_keys, mr->r_key, mr);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
BUG_ON(found && found != mr);
rdsdebug("RDS: get_mr key is %x\n", mr->r_key);
if (mr_ret) {
refcount_inc(&mr->r_refcount);
*mr_ret = mr;
}
ret = 0;
out:
kfree(pages);
if (mr)
rds_mr_put(mr);
return ret;
}
int rds_get_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_args args;
if (optlen != sizeof(struct rds_get_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_args __user *)optval,
sizeof(struct rds_get_mr_args)))
return -EFAULT;
return __rds_rdma_map(rs, &args, NULL, NULL);
}
int rds_get_mr_for_dest(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_get_mr_for_dest_args args;
struct rds_get_mr_args new_args;
if (optlen != sizeof(struct rds_get_mr_for_dest_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_get_mr_for_dest_args __user *)optval,
sizeof(struct rds_get_mr_for_dest_args)))
return -EFAULT;
/*
* Initially, just behave like get_mr().
* TODO: Implement get_mr as wrapper around this
* and deprecate it.
*/
new_args.vec = args.vec;
new_args.cookie_addr = args.cookie_addr;
new_args.flags = args.flags;
return __rds_rdma_map(rs, &new_args, NULL, NULL);
}
/*
* Free the MR indicated by the given R_Key
*/
int rds_free_mr(struct rds_sock *rs, char __user *optval, int optlen)
{
struct rds_free_mr_args args;
struct rds_mr *mr;
unsigned long flags;
if (optlen != sizeof(struct rds_free_mr_args))
return -EINVAL;
if (copy_from_user(&args, (struct rds_free_mr_args __user *)optval,
sizeof(struct rds_free_mr_args)))
return -EFAULT;
/* Special case - a null cookie means flush all unused MRs */
if (args.cookie == 0) {
if (!rs->rs_transport || !rs->rs_transport->flush_mrs)
return -EINVAL;
rs->rs_transport->flush_mrs();
return 0;
}
/* Look up the MR given its R_key and remove it from the rbtree
* so nobody else finds it.
* This should also prevent races with rds_rdma_unuse.
*/
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, rds_rdma_cookie_key(args.cookie), NULL);
if (mr) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
if (args.flags & RDS_RDMA_INVALIDATE)
mr->r_invalidate = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (!mr)
return -EINVAL;
/*
* call rds_destroy_mr() ourselves so that we're sure it's done by the time
* we return. If we let rds_mr_put() do it it might not happen until
* someone else drops their ref.
*/
rds_destroy_mr(mr);
rds_mr_put(mr);
return 0;
}
/*
* This is called when we receive an extension header that
* tells us this MR was used. It allows us to implement
* use_once semantics
*/
void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force)
{
struct rds_mr *mr;
unsigned long flags;
int zot_me = 0;
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr) {
pr_debug("rds: trying to unuse MR with unknown r_key %u!\n",
r_key);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
return;
}
if (mr->r_use_once || force) {
rb_erase(&mr->r_rb_node, &rs->rs_rdma_keys);
RB_CLEAR_NODE(&mr->r_rb_node);
zot_me = 1;
}
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
/* May have to issue a dma_sync on this memory region.
* Note we could avoid this if the operation was a RDMA READ,
* but at this point we can't tell. */
if (mr->r_trans->sync_mr)
mr->r_trans->sync_mr(mr->r_trans_private, DMA_FROM_DEVICE);
/* If the MR was marked as invalidate, this will
* trigger an async flush. */
if (zot_me) {
rds_destroy_mr(mr);
rds_mr_put(mr);
}
}
void rds_rdma_free_op(struct rm_rdma_op *ro)
{
unsigned int i;
for (i = 0; i < ro->op_nents; i++) {
struct page *page = sg_page(&ro->op_sg[i]);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
if (!ro->op_write) {
WARN_ON(!page->mapping && irqs_disabled());
set_page_dirty(page);
}
put_page(page);
}
kfree(ro->op_notifier);
ro->op_notifier = NULL;
ro->op_active = 0;
}
void rds_atomic_free_op(struct rm_atomic_op *ao)
{
struct page *page = sg_page(ao->op_sg);
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
set_page_dirty(page);
put_page(page);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
ao->op_active = 0;
}
/*
* Count the number of pages needed to describe an incoming iovec array.
*/
static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs)
{
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
/* figure out the number of pages in the vector */
for (i = 0; i < nr_iovecs; i++) {
nr_pages = rds_pages_in_vec(&iov[i]);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages;
}
int rds_rdma_extra_size(struct rds_rdma_args *args)
{
struct rds_iovec vec;
struct rds_iovec __user *local_vec;
int tot_pages = 0;
unsigned int nr_pages;
unsigned int i;
local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr;
/* figure out the number of pages in the vector */
for (i = 0; i < args->nr_local; i++) {
if (copy_from_user(&vec, &local_vec[i],
sizeof(struct rds_iovec)))
return -EFAULT;
nr_pages = rds_pages_in_vec(&vec);
if (nr_pages == 0)
return -EINVAL;
tot_pages += nr_pages;
/*
* nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1,
* so tot_pages cannot overflow without first going negative.
*/
if (tot_pages < 0)
return -EINVAL;
}
return tot_pages * sizeof(struct scatterlist);
}
/*
* The application asks for a RDMA transfer.
* Extract all arguments and set up the rdma_op
*/
int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct rds_rdma_args *args;
struct rm_rdma_op *op = &rm->rdma;
int nr_pages;
unsigned int nr_bytes;
struct page **pages = NULL;
struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack;
int iov_size;
unsigned int i, j;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_rdma_args))
|| rm->rdma.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
if (rs->rs_bound_addr == 0) {
ret = -ENOTCONN; /* XXX not a great errno */
goto out_ret;
}
if (args->nr_local > UIO_MAXIOV) {
ret = -EMSGSIZE;
goto out_ret;
}
/* Check whether to allocate the iovec area */
iov_size = args->nr_local * sizeof(struct rds_iovec);
if (args->nr_local > UIO_FASTIOV) {
iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL);
if (!iovs) {
ret = -ENOMEM;
goto out_ret;
}
}
if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) {
ret = -EFAULT;
goto out;
}
nr_pages = rds_rdma_pages(iovs, args->nr_local);
if (nr_pages < 0) {
ret = -EINVAL;
goto out;
}
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
op->op_write = !!(args->flags & RDS_RDMA_READWRITE);
op->op_fence = !!(args->flags & RDS_RDMA_FENCE);
op->op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
op->op_silent = !!(args->flags & RDS_RDMA_SILENT);
op->op_active = 1;
op->op_recverr = rs->rs_recverr;
WARN_ON(!nr_pages);
op->op_sg = rds_message_alloc_sgs(rm, nr_pages);
if (!op->op_sg) {
ret = -ENOMEM;
goto out;
}
if (op->op_notify || op->op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
op->op_notifier = kmalloc(sizeof(struct rds_notifier), GFP_KERNEL);
if (!op->op_notifier) {
ret = -ENOMEM;
goto out;
}
op->op_notifier->n_user_token = args->user_token;
op->op_notifier->n_status = RDS_RDMA_SUCCESS;
/* Enable rmda notification on data operation for composite
* rds messages and make sure notification is enabled only
* for the data operation which follows it so that application
* gets notified only after full message gets delivered.
*/
if (rm->data.op_sg) {
rm->rdma.op_notify = 0;
rm->data.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
}
}
/* The cookie contains the R_Key of the remote memory region, and
* optionally an offset into it. This is how we implement RDMA into
* unaligned memory.
* When setting up the RDMA, we need to add that offset to the
* destination address (which is really an offset into the MR)
* FIXME: We may want to move this into ib_rdma.c
*/
op->op_rkey = rds_rdma_cookie_key(args->cookie);
op->op_remote_addr = args->remote_vec.addr + rds_rdma_cookie_offset(args->cookie);
nr_bytes = 0;
rdsdebug("RDS: rdma prepare nr_local %llu rva %llx rkey %x\n",
(unsigned long long)args->nr_local,
(unsigned long long)args->remote_vec.addr,
op->op_rkey);
for (i = 0; i < args->nr_local; i++) {
struct rds_iovec *iov = &iovs[i];
/* don't need to check, rds_rdma_pages() verified nr will be +nonzero */
unsigned int nr = rds_pages_in_vec(iov);
rs->rs_user_addr = iov->addr;
rs->rs_user_bytes = iov->bytes;
/* If it's a WRITE operation, we want to pin the pages for reading.
* If it's a READ operation, we need to pin the pages for writing.
*/
ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write);
if (ret < 0)
goto out;
else
ret = 0;
rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n",
nr_bytes, nr, iov->bytes, iov->addr);
nr_bytes += iov->bytes;
for (j = 0; j < nr; j++) {
unsigned int offset = iov->addr & ~PAGE_MASK;
struct scatterlist *sg;
sg = &op->op_sg[op->op_nents + j];
sg_set_page(sg, pages[j],
min_t(unsigned int, iov->bytes, PAGE_SIZE - offset),
offset);
rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n",
sg->offset, sg->length, iov->addr, iov->bytes);
iov->addr += sg->length;
iov->bytes -= sg->length;
}
op->op_nents += nr;
}
if (nr_bytes > args->remote_vec.bytes) {
rdsdebug("RDS nr_bytes %u remote_bytes %u do not match\n",
nr_bytes,
(unsigned int) args->remote_vec.bytes);
ret = -EINVAL;
goto out;
}
op->op_bytes = nr_bytes;
out:
if (iovs != iovstack)
sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size);
kfree(pages);
out_ret:
if (ret)
rds_rdma_free_op(op);
else
rds_stats_inc(s_send_rdma);
return ret;
}
/*
* The application wants us to pass an RDMA destination (aka MR)
* to the remote
*/
int rds_cmsg_rdma_dest(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
unsigned long flags;
struct rds_mr *mr;
u32 r_key;
int err = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(rds_rdma_cookie_t)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
memcpy(&rm->m_rdma_cookie, CMSG_DATA(cmsg), sizeof(rm->m_rdma_cookie));
/* We are reusing a previously mapped MR here. Most likely, the
* application has written to the buffer, so we need to explicitly
* flush those writes to RAM. Otherwise the HCA may not see them
* when doing a DMA from that buffer.
*/
r_key = rds_rdma_cookie_key(rm->m_rdma_cookie);
spin_lock_irqsave(&rs->rs_rdma_lock, flags);
mr = rds_mr_tree_walk(&rs->rs_rdma_keys, r_key, NULL);
if (!mr)
err = -EINVAL; /* invalid r_key */
else
refcount_inc(&mr->r_refcount);
spin_unlock_irqrestore(&rs->rs_rdma_lock, flags);
if (mr) {
mr->r_trans->sync_mr(mr->r_trans_private, DMA_TO_DEVICE);
rm->rdma.op_rdma_mr = mr;
}
return err;
}
/*
* The application passes us an address range it wants to enable RDMA
* to/from. We map the area, and save the <R_Key,offset> pair
* in rm->m_rdma_cookie. This causes it to be sent along to the peer
* in an extension header.
*/
int rds_cmsg_rdma_map(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_get_mr_args)) ||
rm->m_rdma_cookie != 0)
return -EINVAL;
return __rds_rdma_map(rs, CMSG_DATA(cmsg), &rm->m_rdma_cookie, &rm->rdma.op_rdma_mr);
}
/*
* Fill in rds_message for an atomic request.
*/
int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
struct cmsghdr *cmsg)
{
struct page *page = NULL;
struct rds_atomic_args *args;
int ret = 0;
if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct rds_atomic_args))
|| rm->atomic.op_active)
return -EINVAL;
args = CMSG_DATA(cmsg);
/* Nonmasked & masked cmsg ops converted to masked hw ops */
switch (cmsg->cmsg_type) {
case RDS_CMSG_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = 0;
break;
case RDS_CMSG_MASKED_ATOMIC_FADD:
rm->atomic.op_type = RDS_ATOMIC_TYPE_FADD;
rm->atomic.op_m_fadd.add = args->m_fadd.add;
rm->atomic.op_m_fadd.nocarry_mask = args->m_fadd.nocarry_mask;
break;
case RDS_CMSG_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->cswp.compare;
rm->atomic.op_m_cswp.swap = args->cswp.swap;
rm->atomic.op_m_cswp.compare_mask = ~0;
rm->atomic.op_m_cswp.swap_mask = ~0;
break;
case RDS_CMSG_MASKED_ATOMIC_CSWP:
rm->atomic.op_type = RDS_ATOMIC_TYPE_CSWP;
rm->atomic.op_m_cswp.compare = args->m_cswp.compare;
rm->atomic.op_m_cswp.swap = args->m_cswp.swap;
rm->atomic.op_m_cswp.compare_mask = args->m_cswp.compare_mask;
rm->atomic.op_m_cswp.swap_mask = args->m_cswp.swap_mask;
break;
default:
BUG(); /* should never happen */
}
rm->atomic.op_notify = !!(args->flags & RDS_RDMA_NOTIFY_ME);
rm->atomic.op_silent = !!(args->flags & RDS_RDMA_SILENT);
rm->atomic.op_active = 1;
rm->atomic.op_recverr = rs->rs_recverr;
rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1);
if (!rm->atomic.op_sg) {
ret = -ENOMEM;
goto err;
}
/* verify 8 byte-aligned */
if (args->local_addr & 0x7) {
ret = -EFAULT;
goto err;
}
ret = rds_pin_pages(args->local_addr, 1, &page, 1);
if (ret != 1)
goto err;
ret = 0;
sg_set_page(rm->atomic.op_sg, page, 8, offset_in_page(args->local_addr));
if (rm->atomic.op_notify || rm->atomic.op_recverr) {
/* We allocate an uninitialized notifier here, because
* we don't want to do that in the completion handler. We
* would have to use GFP_ATOMIC there, and don't want to deal
* with failed allocations.
*/
rm->atomic.op_notifier = kmalloc(sizeof(*rm->atomic.op_notifier), GFP_KERNEL);
if (!rm->atomic.op_notifier) {
ret = -ENOMEM;
goto err;
}
rm->atomic.op_notifier->n_user_token = args->user_token;
rm->atomic.op_notifier->n_status = RDS_RDMA_SUCCESS;
}
rm->atomic.op_rkey = rds_rdma_cookie_key(args->cookie);
rm->atomic.op_remote_addr = args->remote_addr + rds_rdma_cookie_offset(args->cookie);
return ret;
err:
if (page)
put_page(page);
kfree(rm->atomic.op_notifier);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_635_0 |
crossvul-cpp_data_good_4808_1 | /*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "git2.h"
#include "git2/odb_backend.h"
#include "smart.h"
#include "refs.h"
#include "repository.h"
#include "push.h"
#include "pack-objects.h"
#include "remote.h"
#include "util.h"
#define NETWORK_XFER_THRESHOLD (100*1024)
/* The minimal interval between progress updates (in seconds). */
#define MIN_PROGRESS_UPDATE_INTERVAL 0.5
int git_smart__store_refs(transport_smart *t, int flushes)
{
gitno_buffer *buf = &t->buffer;
git_vector *refs = &t->refs;
int error, flush = 0, recvd;
const char *line_end = NULL;
git_pkt *pkt = NULL;
size_t i;
/* Clear existing refs in case git_remote_connect() is called again
* after git_remote_disconnect().
*/
git_vector_foreach(refs, i, pkt) {
git_pkt_free(pkt);
}
git_vector_clear(refs);
pkt = NULL;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data, &line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS)
return error;
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
if (recvd == 0) {
giterr_set(GITERR_NET, "early EOF");
return GIT_EEOF;
}
continue;
}
gitno_consume(buf, line_end);
if (pkt->type == GIT_PKT_ERR) {
giterr_set(GITERR_NET, "Remote error: %s", ((git_pkt_err *)pkt)->error);
git__free(pkt);
return -1;
}
if (pkt->type != GIT_PKT_FLUSH && git_vector_insert(refs, pkt) < 0)
return -1;
if (pkt->type == GIT_PKT_FLUSH) {
flush++;
git_pkt_free(pkt);
}
} while (flush < flushes);
return flush;
}
static int append_symref(const char **out, git_vector *symrefs, const char *ptr)
{
int error;
const char *end;
git_buf buf = GIT_BUF_INIT;
git_refspec *mapping = NULL;
ptr += strlen(GIT_CAP_SYMREF);
if (*ptr != '=')
goto on_invalid;
ptr++;
if (!(end = strchr(ptr, ' ')) &&
!(end = strchr(ptr, '\0')))
goto on_invalid;
if ((error = git_buf_put(&buf, ptr, end - ptr)) < 0)
return error;
/* symref mapping has refspec format */
mapping = git__calloc(1, sizeof(git_refspec));
GITERR_CHECK_ALLOC(mapping);
error = git_refspec__parse(mapping, git_buf_cstr(&buf), true);
git_buf_free(&buf);
/* if the error isn't OOM, then it's a parse error; let's use a nicer message */
if (error < 0) {
if (giterr_last()->klass != GITERR_NOMEMORY)
goto on_invalid;
git__free(mapping);
return error;
}
if ((error = git_vector_insert(symrefs, mapping)) < 0)
return error;
*out = end;
return 0;
on_invalid:
giterr_set(GITERR_NET, "remote sent invalid symref");
git_refspec__free(mapping);
git__free(mapping);
return -1;
}
int git_smart__detect_caps(git_pkt_ref *pkt, transport_smart_caps *caps, git_vector *symrefs)
{
const char *ptr;
/* No refs or capabilites, odd but not a problem */
if (pkt == NULL || pkt->capabilities == NULL)
return 0;
ptr = pkt->capabilities;
while (ptr != NULL && *ptr != '\0') {
if (*ptr == ' ')
ptr++;
if (!git__prefixcmp(ptr, GIT_CAP_OFS_DELTA)) {
caps->common = caps->ofs_delta = 1;
ptr += strlen(GIT_CAP_OFS_DELTA);
continue;
}
/* Keep multi_ack_detailed before multi_ack */
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK_DETAILED)) {
caps->common = caps->multi_ack_detailed = 1;
ptr += strlen(GIT_CAP_MULTI_ACK_DETAILED);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK)) {
caps->common = caps->multi_ack = 1;
ptr += strlen(GIT_CAP_MULTI_ACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_INCLUDE_TAG)) {
caps->common = caps->include_tag = 1;
ptr += strlen(GIT_CAP_INCLUDE_TAG);
continue;
}
/* Keep side-band check after side-band-64k */
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND_64K)) {
caps->common = caps->side_band_64k = 1;
ptr += strlen(GIT_CAP_SIDE_BAND_64K);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND)) {
caps->common = caps->side_band = 1;
ptr += strlen(GIT_CAP_SIDE_BAND);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_DELETE_REFS)) {
caps->common = caps->delete_refs = 1;
ptr += strlen(GIT_CAP_DELETE_REFS);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_THIN_PACK)) {
caps->common = caps->thin_pack = 1;
ptr += strlen(GIT_CAP_THIN_PACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SYMREF)) {
int error;
if ((error = append_symref(&ptr, symrefs, ptr)) < 0)
return error;
continue;
}
/* We don't know this capability, so skip it */
ptr = strchr(ptr, ' ');
}
return 0;
}
static int recv_pkt(git_pkt **out, gitno_buffer *buf)
{
const char *ptr = buf->data, *line_end = ptr;
git_pkt *pkt = NULL;
int pkt_type, error = 0, ret;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, ptr, &line_end, buf->offset);
else
error = GIT_EBUFS;
if (error == 0)
break; /* return the pkt */
if (error < 0 && error != GIT_EBUFS)
return error;
if ((ret = gitno_recv(buf)) < 0) {
return ret;
} else if (ret == 0) {
giterr_set(GITERR_NET, "early EOF");
return GIT_EEOF;
}
} while (error);
gitno_consume(buf, line_end);
pkt_type = pkt->type;
if (out != NULL)
*out = pkt;
else
git__free(pkt);
return pkt_type;
}
static int store_common(transport_smart *t)
{
git_pkt *pkt = NULL;
gitno_buffer *buf = &t->buffer;
int error;
do {
if ((error = recv_pkt(&pkt, buf)) < 0)
return error;
if (pkt->type == GIT_PKT_ACK) {
if (git_vector_insert(&t->common, pkt) < 0)
return -1;
} else {
git__free(pkt);
return 0;
}
} while (1);
return 0;
}
static int fetch_setup_walk(git_revwalk **out, git_repository *repo)
{
git_revwalk *walk = NULL;
git_strarray refs;
unsigned int i;
git_reference *ref;
int error;
if ((error = git_reference_list(&refs, repo)) < 0)
return error;
if ((error = git_revwalk_new(&walk, repo)) < 0)
return error;
git_revwalk_sorting(walk, GIT_SORT_TIME);
for (i = 0; i < refs.count; ++i) {
/* No tags */
if (!git__prefixcmp(refs.strings[i], GIT_REFS_TAGS_DIR))
continue;
if ((error = git_reference_lookup(&ref, repo, refs.strings[i])) < 0)
goto on_error;
if (git_reference_type(ref) == GIT_REF_SYMBOLIC)
continue;
if ((error = git_revwalk_push(walk, git_reference_target(ref))) < 0)
goto on_error;
git_reference_free(ref);
}
git_strarray_free(&refs);
*out = walk;
return 0;
on_error:
git_revwalk_free(walk);
git_reference_free(ref);
git_strarray_free(&refs);
return error;
}
static int wait_while_ack(gitno_buffer *buf)
{
int error;
git_pkt_ack *pkt = NULL;
while (1) {
git__free(pkt);
if ((error = recv_pkt((git_pkt **)&pkt, buf)) < 0)
return error;
if (pkt->type == GIT_PKT_NAK)
break;
if (pkt->type == GIT_PKT_ACK &&
(pkt->status != GIT_ACK_CONTINUE &&
pkt->status != GIT_ACK_COMMON)) {
git__free(pkt);
return 0;
}
}
git__free(pkt);
return 0;
}
int git_smart__negotiate_fetch(git_transport *transport, git_repository *repo, const git_remote_head * const *wants, size_t count)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_buf data = GIT_BUF_INIT;
git_revwalk *walk = NULL;
int error = -1, pkt_type;
unsigned int i;
git_oid oid;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
return error;
if ((error = fetch_setup_walk(&walk, repo)) < 0)
goto on_error;
/*
* Our support for ACK extensions is simply to parse them. On
* the first ACK we will accept that as enough common
* objects. We give up if we haven't found an answer in the
* first 256 we send.
*/
i = 0;
while (i < 256) {
error = git_revwalk_next(&oid, walk);
if (error < 0) {
if (GIT_ITEROVER == error)
break;
goto on_error;
}
git_pkt_buffer_have(&oid, &data);
i++;
if (i % 20 == 0) {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
git_pkt_buffer_flush(&data);
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_clear(&data);
if (t->caps.multi_ack || t->caps.multi_ack_detailed) {
if ((error = store_common(t)) < 0)
goto on_error;
} else {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type == GIT_PKT_ACK) {
break;
} else if (pkt_type == GIT_PKT_NAK) {
continue;
} else if (pkt_type < 0) {
/* recv_pkt returned an error */
error = pkt_type;
goto on_error;
} else {
giterr_set(GITERR_NET, "Unexpected pkt type");
error = -1;
goto on_error;
}
}
}
if (t->common.length > 0)
break;
if (i % 20 == 0 && t->rpc) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
}
/* Tell the other end that we're done negotiating */
if (t->rpc && t->common.length > 0) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
if ((error = git_pkt_buffer_done(&data)) < 0)
goto on_error;
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_free(&data);
git_revwalk_free(walk);
/* Now let's eat up whatever the server gives us */
if (!t->caps.multi_ack && !t->caps.multi_ack_detailed) {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type < 0) {
return pkt_type;
} else if (pkt_type != GIT_PKT_ACK && pkt_type != GIT_PKT_NAK) {
giterr_set(GITERR_NET, "Unexpected pkt type");
return -1;
}
} else {
error = wait_while_ack(buf);
}
return error;
on_error:
git_revwalk_free(walk);
git_buf_free(&data);
return error;
}
static int no_sideband(transport_smart *t, struct git_odb_writepack *writepack, gitno_buffer *buf, git_transfer_progress *stats)
{
int recvd;
do {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
return GIT_EUSER;
}
if (writepack->append(writepack, buf->data, buf->offset, stats) < 0)
return -1;
gitno_consume_n(buf, buf->offset);
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
} while(recvd > 0);
if (writepack->commit(writepack, stats) < 0)
return -1;
return 0;
}
struct network_packetsize_payload
{
git_transfer_progress_cb callback;
void *payload;
git_transfer_progress *stats;
size_t last_fired_bytes;
};
static int network_packetsize(size_t received, void *payload)
{
struct network_packetsize_payload *npp = (struct network_packetsize_payload*)payload;
/* Accumulate bytes */
npp->stats->received_bytes += received;
/* Fire notification if the threshold is reached */
if ((npp->stats->received_bytes - npp->last_fired_bytes) > NETWORK_XFER_THRESHOLD) {
npp->last_fired_bytes = npp->stats->received_bytes;
if (npp->callback(npp->stats, npp->payload))
return GIT_EUSER;
}
return 0;
}
int git_smart__download_pack(
git_transport *transport,
git_repository *repo,
git_transfer_progress *stats,
git_transfer_progress_cb transfer_progress_cb,
void *progress_payload)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_odb *odb;
struct git_odb_writepack *writepack = NULL;
int error = 0;
struct network_packetsize_payload npp = {0};
memset(stats, 0, sizeof(git_transfer_progress));
if (transfer_progress_cb) {
npp.callback = transfer_progress_cb;
npp.payload = progress_payload;
npp.stats = stats;
t->packetsize_cb = &network_packetsize;
t->packetsize_payload = &npp;
/* We might have something in the buffer already from negotiate_fetch */
if (t->buffer.offset > 0 && !t->cancelled.val)
if (t->packetsize_cb(t->buffer.offset, t->packetsize_payload))
git_atomic_set(&t->cancelled, 1);
}
if ((error = git_repository_odb__weakptr(&odb, repo)) < 0 ||
((error = git_odb_write_pack(&writepack, odb, transfer_progress_cb, progress_payload)) != 0))
goto done;
/*
* If the remote doesn't support the side-band, we can feed
* the data directly to the pack writer. Otherwise, we need to
* check which one belongs there.
*/
if (!t->caps.side_band && !t->caps.side_band_64k) {
error = no_sideband(t, writepack, buf, stats);
goto done;
}
do {
git_pkt *pkt = NULL;
/* Check cancellation before network call */
if (t->cancelled.val) {
giterr_clear();
error = GIT_EUSER;
goto done;
}
if ((error = recv_pkt(&pkt, buf)) >= 0) {
/* Check cancellation after network call */
if (t->cancelled.val) {
giterr_clear();
error = GIT_EUSER;
} else if (pkt->type == GIT_PKT_PROGRESS) {
if (t->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = t->progress_cb(p->data, p->len, t->message_cb_payload);
}
} else if (pkt->type == GIT_PKT_DATA) {
git_pkt_data *p = (git_pkt_data *) pkt;
if (p->len)
error = writepack->append(writepack, p->data, p->len, stats);
} else if (pkt->type == GIT_PKT_FLUSH) {
/* A flush indicates the end of the packfile */
git__free(pkt);
break;
}
}
git__free(pkt);
if (error < 0)
goto done;
} while (1);
/*
* Trailing execution of transfer_progress_cb, if necessary...
* Only the callback through the npp datastructure currently
* updates the last_fired_bytes value. It is possible that
* progress has already been reported with the correct
* "received_bytes" value, but until (if?) this is unified
* then we will report progress again to be sure that the
* correct last received_bytes value is reported.
*/
if (npp.callback && npp.stats->received_bytes > npp.last_fired_bytes) {
error = npp.callback(npp.stats, npp.payload);
if (error != 0)
goto done;
}
error = writepack->commit(writepack, stats);
done:
if (writepack)
writepack->free(writepack);
if (transfer_progress_cb) {
t->packetsize_cb = NULL;
t->packetsize_payload = NULL;
}
return error;
}
static int gen_pktline(git_buf *buf, git_push *push)
{
push_spec *spec;
size_t i, len;
char old_id[GIT_OID_HEXSZ+1], new_id[GIT_OID_HEXSZ+1];
old_id[GIT_OID_HEXSZ] = '\0'; new_id[GIT_OID_HEXSZ] = '\0';
git_vector_foreach(&push->specs, i, spec) {
len = 2*GIT_OID_HEXSZ + 7 + strlen(spec->refspec.dst);
if (i == 0) {
++len; /* '\0' */
if (push->report_status)
len += strlen(GIT_CAP_REPORT_STATUS) + 1;
len += strlen(GIT_CAP_SIDE_BAND_64K) + 1;
}
git_oid_fmt(old_id, &spec->roid);
git_oid_fmt(new_id, &spec->loid);
git_buf_printf(buf, "%04"PRIxZ"%s %s %s", len, old_id, new_id, spec->refspec.dst);
if (i == 0) {
git_buf_putc(buf, '\0');
/* Core git always starts their capabilities string with a space */
if (push->report_status) {
git_buf_putc(buf, ' ');
git_buf_printf(buf, GIT_CAP_REPORT_STATUS);
}
git_buf_putc(buf, ' ');
git_buf_printf(buf, GIT_CAP_SIDE_BAND_64K);
}
git_buf_putc(buf, '\n');
}
git_buf_puts(buf, "0000");
return git_buf_oom(buf) ? -1 : 0;
}
static int add_push_report_pkt(git_push *push, git_pkt *pkt)
{
push_status *status;
switch (pkt->type) {
case GIT_PKT_OK:
status = git__calloc(1, sizeof(push_status));
GITERR_CHECK_ALLOC(status);
status->msg = NULL;
status->ref = git__strdup(((git_pkt_ok *)pkt)->ref);
if (!status->ref ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_NG:
status = git__calloc(1, sizeof(push_status));
GITERR_CHECK_ALLOC(status);
status->ref = git__strdup(((git_pkt_ng *)pkt)->ref);
status->msg = git__strdup(((git_pkt_ng *)pkt)->msg);
if (!status->ref || !status->msg ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_UNPACK:
push->unpack_ok = ((git_pkt_unpack *)pkt)->unpack_ok;
break;
case GIT_PKT_FLUSH:
return GIT_ITEROVER;
default:
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
return 0;
}
static int add_push_report_sideband_pkt(git_push *push, git_pkt_data *data_pkt, git_buf *data_pkt_buf)
{
git_pkt *pkt;
const char *line, *line_end = NULL;
size_t line_len;
int error;
int reading_from_buf = data_pkt_buf->size > 0;
if (reading_from_buf) {
/* We had an existing partial packet, so add the new
* packet to the buffer and parse the whole thing */
git_buf_put(data_pkt_buf, data_pkt->data, data_pkt->len);
line = data_pkt_buf->ptr;
line_len = data_pkt_buf->size;
}
else {
line = data_pkt->data;
line_len = data_pkt->len;
}
while (line_len > 0) {
error = git_pkt_parse_line(&pkt, line, &line_end, line_len);
if (error == GIT_EBUFS) {
/* Buffer the data when the inner packet is split
* across multiple sideband packets */
if (!reading_from_buf)
git_buf_put(data_pkt_buf, line, line_len);
error = 0;
goto done;
}
else if (error < 0)
goto done;
/* Advance in the buffer */
line_len -= (line_end - line);
line = line_end;
error = add_push_report_pkt(push, pkt);
git_pkt_free(pkt);
if (error < 0 && error != GIT_ITEROVER)
goto done;
}
error = 0;
done:
if (reading_from_buf)
git_buf_consume(data_pkt_buf, line_end);
return error;
}
static int parse_report(transport_smart *transport, git_push *push)
{
git_pkt *pkt = NULL;
const char *line_end = NULL;
gitno_buffer *buf = &transport->buffer;
int error, recvd;
git_buf data_pkt_buf = GIT_BUF_INIT;
for (;;) {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data,
&line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS) {
error = -1;
goto done;
}
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0) {
error = recvd;
goto done;
}
if (recvd == 0) {
giterr_set(GITERR_NET, "early EOF");
error = GIT_EEOF;
goto done;
}
continue;
}
gitno_consume(buf, line_end);
error = 0;
switch (pkt->type) {
case GIT_PKT_DATA:
/* This is a sideband packet which contains other packets */
error = add_push_report_sideband_pkt(push, (git_pkt_data *)pkt, &data_pkt_buf);
break;
case GIT_PKT_ERR:
giterr_set(GITERR_NET, "report-status: Error reported: %s",
((git_pkt_err *)pkt)->error);
error = -1;
break;
case GIT_PKT_PROGRESS:
if (transport->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = transport->progress_cb(p->data, p->len, transport->message_cb_payload);
}
break;
default:
error = add_push_report_pkt(push, pkt);
break;
}
git_pkt_free(pkt);
/* add_push_report_pkt returns GIT_ITEROVER when it receives a flush */
if (error == GIT_ITEROVER) {
error = 0;
if (data_pkt_buf.size > 0) {
/* If there was data remaining in the pack data buffer,
* then the server sent a partial pkt-line */
giterr_set(GITERR_NET, "Incomplete pack data pkt-line");
error = GIT_ERROR;
}
goto done;
}
if (error < 0) {
goto done;
}
}
done:
git_buf_free(&data_pkt_buf);
return error;
}
static int add_ref_from_push_spec(git_vector *refs, push_spec *push_spec)
{
git_pkt_ref *added = git__calloc(1, sizeof(git_pkt_ref));
GITERR_CHECK_ALLOC(added);
added->type = GIT_PKT_REF;
git_oid_cpy(&added->head.oid, &push_spec->loid);
added->head.name = git__strdup(push_spec->refspec.dst);
if (!added->head.name ||
git_vector_insert(refs, added) < 0) {
git_pkt_free((git_pkt *)added);
return -1;
}
return 0;
}
static int update_refs_from_report(
git_vector *refs,
git_vector *push_specs,
git_vector *push_report)
{
git_pkt_ref *ref;
push_spec *push_spec;
push_status *push_status;
size_t i, j, refs_len;
int cmp;
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report */
if (push_specs->length != push_report->length) {
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
/* We require that push_specs be sorted with push_spec_rref_cmp,
* and that push_report be sorted with push_status_ref_cmp */
git_vector_sort(push_specs);
git_vector_sort(push_report);
git_vector_foreach(push_specs, i, push_spec) {
push_status = git_vector_get(push_report, i);
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report which matches */
if (strcmp(push_spec->refspec.dst, push_status->ref)) {
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
}
/* We require that refs be sorted with ref_name_cmp */
git_vector_sort(refs);
i = j = 0;
refs_len = refs->length;
/* Merge join push_specs with refs */
while (i < push_specs->length && j < refs_len) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
ref = git_vector_get(refs, j);
cmp = strcmp(push_spec->refspec.dst, ref->head.name);
/* Iterate appropriately */
if (cmp <= 0) i++;
if (cmp >= 0) j++;
/* Add case */
if (cmp < 0 &&
!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
/* Update case, delete case */
if (cmp == 0 &&
!push_status->msg)
git_oid_cpy(&ref->head.oid, &push_spec->loid);
}
for (; i < push_specs->length; i++) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
/* Add case */
if (!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
}
/* Remove any refs which we updated to have a zero OID. */
git_vector_rforeach(refs, i, ref) {
if (git_oid_iszero(&ref->head.oid)) {
git_vector_remove(refs, i);
git_pkt_free((git_pkt *)ref);
}
}
git_vector_sort(refs);
return 0;
}
struct push_packbuilder_payload
{
git_smart_subtransport_stream *stream;
git_packbuilder *pb;
git_push_transfer_progress cb;
void *cb_payload;
size_t last_bytes;
double last_progress_report_time;
};
static int stream_thunk(void *buf, size_t size, void *data)
{
int error = 0;
struct push_packbuilder_payload *payload = data;
if ((error = payload->stream->write(payload->stream, (const char *)buf, size)) < 0)
return error;
if (payload->cb) {
double current_time = git__timer();
payload->last_bytes += size;
if ((current_time - payload->last_progress_report_time) >= MIN_PROGRESS_UPDATE_INTERVAL) {
payload->last_progress_report_time = current_time;
error = payload->cb(payload->pb->nr_written, payload->pb->nr_objects, payload->last_bytes, payload->cb_payload);
}
}
return error;
}
int git_smart__push(git_transport *transport, git_push *push, const git_remote_callbacks *cbs)
{
transport_smart *t = (transport_smart *)transport;
struct push_packbuilder_payload packbuilder_payload = {0};
git_buf pktline = GIT_BUF_INIT;
int error = 0, need_pack = 0;
push_spec *spec;
unsigned int i;
packbuilder_payload.pb = push->pb;
if (cbs && cbs->push_transfer_progress) {
packbuilder_payload.cb = cbs->push_transfer_progress;
packbuilder_payload.cb_payload = cbs->payload;
}
#ifdef PUSH_DEBUG
{
git_remote_head *head;
char hex[GIT_OID_HEXSZ+1]; hex[GIT_OID_HEXSZ] = '\0';
git_vector_foreach(&push->remote->refs, i, head) {
git_oid_fmt(hex, &head->oid);
fprintf(stderr, "%s (%s)\n", hex, head->name);
}
git_vector_foreach(&push->specs, i, spec) {
git_oid_fmt(hex, &spec->roid);
fprintf(stderr, "%s (%s) -> ", hex, spec->lref);
git_oid_fmt(hex, &spec->loid);
fprintf(stderr, "%s (%s)\n", hex, spec->rref ?
spec->rref : spec->lref);
}
}
#endif
/*
* Figure out if we need to send a packfile; which is in all
* cases except when we only send delete commands
*/
git_vector_foreach(&push->specs, i, spec) {
if (spec->refspec.src && spec->refspec.src[0] != '\0') {
need_pack = 1;
break;
}
}
if ((error = git_smart__get_push_stream(t, &packbuilder_payload.stream)) < 0 ||
(error = gen_pktline(&pktline, push)) < 0 ||
(error = packbuilder_payload.stream->write(packbuilder_payload.stream, git_buf_cstr(&pktline), git_buf_len(&pktline))) < 0)
goto done;
if (need_pack &&
(error = git_packbuilder_foreach(push->pb, &stream_thunk, &packbuilder_payload)) < 0)
goto done;
/* If we sent nothing or the server doesn't support report-status, then
* we consider the pack to have been unpacked successfully */
if (!push->specs.length || !push->report_status)
push->unpack_ok = 1;
else if ((error = parse_report(t, push)) < 0)
goto done;
/* If progress is being reported write the final report */
if (cbs && cbs->push_transfer_progress) {
error = cbs->push_transfer_progress(
push->pb->nr_written,
push->pb->nr_objects,
packbuilder_payload.last_bytes,
cbs->payload);
if (error < 0)
goto done;
}
if (push->status.length) {
error = update_refs_from_report(&t->refs, &push->specs, &push->status);
if (error < 0)
goto done;
error = git_smart__update_heads(t, NULL);
}
done:
git_buf_free(&pktline);
return error;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_4808_1 |
crossvul-cpp_data_good_5715_2 | /**
* FreeRDP: A Remote Desktop Protocol Implementation
* Network Transport Layer
*
* Copyright 2011 Vic Lee
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <winpr/crt.h>
#include <winpr/synch.h>
#include <winpr/print.h>
#include <freerdp/error.h>
#include <freerdp/utils/tcp.h>
#include <winpr/stream.h>
#include <time.h>
#include <errno.h>
#include <fcntl.h>
#ifndef _WIN32
#include <netdb.h>
#include <sys/socket.h>
#endif
#include "tpkt.h"
#include "fastpath.h"
#include "transport.h"
#define BUFFER_SIZE 16384
static void* transport_client_thread(void* arg);
wStream* transport_send_stream_init(rdpTransport* transport, int size)
{
wStream* s;
s = StreamPool_Take(transport->ReceivePool, size);
Stream_EnsureCapacity(s, size);
Stream_SetPosition(s, 0);
return s;
}
void transport_attach(rdpTransport* transport, int sockfd)
{
transport->TcpIn->sockfd = sockfd;
transport->SplitInputOutput = FALSE;
transport->TcpOut = transport->TcpIn;
}
BOOL transport_disconnect(rdpTransport* transport)
{
BOOL status = TRUE;
if (transport->layer == TRANSPORT_LAYER_TLS)
status &= tls_disconnect(transport->TlsIn);
if (transport->layer == TRANSPORT_LAYER_TSG)
{
tsg_disconnect(transport->tsg);
}
else
{
status &= tcp_disconnect(transport->TcpIn);
}
return status;
}
BOOL transport_connect_rdp(rdpTransport* transport)
{
/* RDP encryption */
return TRUE;
}
BOOL transport_connect_tls(rdpTransport* transport)
{
if (transport->layer == TRANSPORT_LAYER_TSG)
return TRUE;
if (transport->TlsIn == NULL)
transport->TlsIn = tls_new(transport->settings);
if (transport->TlsOut == NULL)
transport->TlsOut = transport->TlsIn;
transport->layer = TRANSPORT_LAYER_TLS;
transport->TlsIn->sockfd = transport->TcpIn->sockfd;
if (tls_connect(transport->TlsIn) != TRUE)
{
if (!connectErrorCode)
connectErrorCode = TLSCONNECTERROR;
tls_free(transport->TlsIn);
if (transport->TlsIn == transport->TlsOut)
transport->TlsIn = transport->TlsOut = NULL;
else
transport->TlsIn = NULL;
return FALSE;
}
return TRUE;
}
BOOL transport_connect_nla(rdpTransport* transport)
{
freerdp* instance;
rdpSettings* settings;
if (transport->layer == TRANSPORT_LAYER_TSG)
return TRUE;
if (!transport_connect_tls(transport))
return FALSE;
/* Network Level Authentication */
if (transport->settings->Authentication != TRUE)
return TRUE;
settings = transport->settings;
instance = (freerdp*) settings->instance;
if (transport->credssp == NULL)
transport->credssp = credssp_new(instance, transport, settings);
if (credssp_authenticate(transport->credssp) < 0)
{
if (!connectErrorCode)
connectErrorCode = AUTHENTICATIONERROR;
fprintf(stderr, "Authentication failure, check credentials.\n"
"If credentials are valid, the NTLMSSP implementation may be to blame.\n");
credssp_free(transport->credssp);
transport->credssp = NULL;
return FALSE;
}
credssp_free(transport->credssp);
return TRUE;
}
BOOL transport_tsg_connect(rdpTransport* transport, const char* hostname, UINT16 port)
{
rdpTsg* tsg = tsg_new(transport);
tsg->transport = transport;
transport->tsg = tsg;
transport->SplitInputOutput = TRUE;
if (transport->TlsIn == NULL)
transport->TlsIn = tls_new(transport->settings);
transport->TlsIn->sockfd = transport->TcpIn->sockfd;
if (transport->TlsOut == NULL)
transport->TlsOut = tls_new(transport->settings);
transport->TlsOut->sockfd = transport->TcpOut->sockfd;
if (tls_connect(transport->TlsIn) != TRUE)
return FALSE;
if (tls_connect(transport->TlsOut) != TRUE)
return FALSE;
if (!tsg_connect(tsg, hostname, port))
return FALSE;
return TRUE;
}
BOOL transport_connect(rdpTransport* transport, const char* hostname, UINT16 port)
{
BOOL status = FALSE;
rdpSettings* settings = transport->settings;
transport->async = transport->settings->AsyncTransport;
if (transport->async)
{
transport->stopEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
transport->thread = CreateThread(NULL, 0,
(LPTHREAD_START_ROUTINE) transport_client_thread, transport, 0, NULL);
}
if (transport->settings->GatewayUsageMethod)
{
transport->layer = TRANSPORT_LAYER_TSG;
transport->TcpOut = tcp_new(settings);
status = tcp_connect(transport->TcpIn, settings->GatewayHostname, 443);
if (status)
status = tcp_connect(transport->TcpOut, settings->GatewayHostname, 443);
if (status)
status = transport_tsg_connect(transport, hostname, port);
}
else
{
status = tcp_connect(transport->TcpIn, hostname, port);
transport->SplitInputOutput = FALSE;
transport->TcpOut = transport->TcpIn;
}
return status;
}
BOOL transport_accept_rdp(rdpTransport* transport)
{
/* RDP encryption */
return TRUE;
}
BOOL transport_accept_tls(rdpTransport* transport)
{
if (transport->TlsIn == NULL)
transport->TlsIn = tls_new(transport->settings);
if (transport->TlsOut == NULL)
transport->TlsOut = transport->TlsIn;
transport->layer = TRANSPORT_LAYER_TLS;
transport->TlsIn->sockfd = transport->TcpIn->sockfd;
if (tls_accept(transport->TlsIn, transport->settings->CertificateFile, transport->settings->PrivateKeyFile) != TRUE)
return FALSE;
return TRUE;
}
BOOL transport_accept_nla(rdpTransport* transport)
{
freerdp* instance;
rdpSettings* settings;
if (transport->TlsIn == NULL)
transport->TlsIn = tls_new(transport->settings);
if (transport->TlsOut == NULL)
transport->TlsOut = transport->TlsIn;
transport->layer = TRANSPORT_LAYER_TLS;
transport->TlsIn->sockfd = transport->TcpIn->sockfd;
if (tls_accept(transport->TlsIn, transport->settings->CertificateFile, transport->settings->PrivateKeyFile) != TRUE)
return FALSE;
/* Network Level Authentication */
if (transport->settings->Authentication != TRUE)
return TRUE;
settings = transport->settings;
instance = (freerdp*) settings->instance;
if (transport->credssp == NULL)
transport->credssp = credssp_new(instance, transport, settings);
if (credssp_authenticate(transport->credssp) < 0)
{
fprintf(stderr, "client authentication failure\n");
credssp_free(transport->credssp);
transport->credssp = NULL;
return FALSE;
}
/* don't free credssp module yet, we need to copy the credentials from it first */
return TRUE;
}
BOOL nla_verify_header(wStream* s)
{
if ((s->pointer[0] == 0x30) && (s->pointer[1] & 0x80))
return TRUE;
return FALSE;
}
UINT32 nla_read_header(wStream* s)
{
UINT32 length = 0;
if (s->pointer[1] & 0x80)
{
if ((s->pointer[1] & ~(0x80)) == 1)
{
length = s->pointer[2];
length += 3;
Stream_Seek(s, 3);
}
else if ((s->pointer[1] & ~(0x80)) == 2)
{
length = (s->pointer[2] << 8) | s->pointer[3];
length += 4;
Stream_Seek(s, 4);
}
else
{
fprintf(stderr, "Error reading TSRequest!\n");
}
}
else
{
length = s->pointer[1];
length += 2;
Stream_Seek(s, 2);
}
return length;
}
UINT32 nla_header_length(wStream* s)
{
UINT32 length = 0;
if (s->pointer[1] & 0x80)
{
if ((s->pointer[1] & ~(0x80)) == 1)
length = 3;
else if ((s->pointer[1] & ~(0x80)) == 2)
length = 4;
else
fprintf(stderr, "Error reading TSRequest!\n");
}
else
{
length = 2;
}
return length;
}
int transport_read_layer(rdpTransport* transport, UINT8* data, int bytes)
{
int read = 0;
int status = -1;
while (read < bytes)
{
if (transport->layer == TRANSPORT_LAYER_TLS)
status = tls_read(transport->TlsIn, data + read, bytes - read);
else if (transport->layer == TRANSPORT_LAYER_TCP)
status = tcp_read(transport->TcpIn, data + read, bytes - read);
else if (transport->layer == TRANSPORT_LAYER_TSG)
status = tsg_read(transport->tsg, data + read, bytes - read);
/* blocking means that we can't continue until this is read */
if (!transport->blocking)
return status;
if (status < 0)
return status;
read += status;
if (status == 0)
{
/*
* instead of sleeping, we should wait timeout on the
* socket but this only happens on initial connection
*/
USleep(transport->SleepInterval);
}
}
return read;
}
int transport_read(rdpTransport* transport, wStream* s)
{
int status;
int pduLength;
int streamPosition;
int transport_status;
pduLength = 0;
transport_status = 0;
/* first check if we have header */
streamPosition = Stream_GetPosition(s);
if (streamPosition < 4)
{
status = transport_read_layer(transport, Stream_Buffer(s) + streamPosition, 4 - streamPosition);
if (status < 0)
return status;
transport_status += status;
if ((status + streamPosition) < 4)
return transport_status;
streamPosition += status;
}
/* if header is present, read in exactly one PDU */
if (s->buffer[0] == 0x03)
{
/* TPKT header */
pduLength = (s->buffer[2] << 8) | s->buffer[3];
}
else if (s->buffer[0] == 0x30)
{
/* TSRequest (NLA) */
if (s->buffer[1] & 0x80)
{
if ((s->buffer[1] & ~(0x80)) == 1)
{
pduLength = s->buffer[2];
pduLength += 3;
}
else if ((s->buffer[1] & ~(0x80)) == 2)
{
pduLength = (s->buffer[2] << 8) | s->buffer[3];
pduLength += 4;
}
else
{
fprintf(stderr, "Error reading TSRequest!\n");
}
}
else
{
pduLength = s->buffer[1];
pduLength += 2;
}
}
else
{
/* Fast-Path Header */
if (s->buffer[1] & 0x80)
pduLength = ((s->buffer[1] & 0x7F) << 8) | s->buffer[2];
else
pduLength = s->buffer[1];
}
status = transport_read_layer(transport, Stream_Buffer(s) + streamPosition, pduLength - streamPosition);
if (status < 0)
return status;
transport_status += status;
#ifdef WITH_DEBUG_TRANSPORT
/* dump when whole PDU is read */
if (streamPosition + status >= pduLength)
{
fprintf(stderr, "Local < Remote\n");
winpr_HexDump(Stream_Buffer(s), pduLength);
}
#endif
return transport_status;
}
static int transport_read_nonblocking(rdpTransport* transport)
{
int status;
status = transport_read(transport, transport->ReceiveBuffer);
if (status <= 0)
return status;
Stream_Seek(transport->ReceiveBuffer, status);
return status;
}
int transport_write(rdpTransport* transport, wStream* s)
{
int length;
int status = -1;
WaitForSingleObject(transport->WriteMutex, INFINITE);
length = Stream_GetPosition(s);
Stream_SetPosition(s, 0);
#ifdef WITH_DEBUG_TRANSPORT
if (length > 0)
{
fprintf(stderr, "Local > Remote\n");
winpr_HexDump(Stream_Buffer(s), length);
}
#endif
while (length > 0)
{
if (transport->layer == TRANSPORT_LAYER_TLS)
status = tls_write(transport->TlsOut, Stream_Pointer(s), length);
else if (transport->layer == TRANSPORT_LAYER_TCP)
status = tcp_write(transport->TcpOut, Stream_Pointer(s), length);
else if (transport->layer == TRANSPORT_LAYER_TSG)
status = tsg_write(transport->tsg, Stream_Pointer(s), length);
if (status < 0)
break; /* error occurred */
if (status == 0)
{
/* when sending is blocked in nonblocking mode, the receiving buffer should be checked */
if (!transport->blocking)
{
/* and in case we do have buffered some data, we set the event so next loop will get it */
if (transport_read_nonblocking(transport) > 0)
SetEvent(transport->ReceiveEvent);
}
if (transport->layer == TRANSPORT_LAYER_TLS)
tls_wait_write(transport->TlsOut);
else if (transport->layer == TRANSPORT_LAYER_TCP)
tcp_wait_write(transport->TcpOut);
else
USleep(transport->SleepInterval);
}
length -= status;
Stream_Seek(s, status);
}
if (status < 0)
{
/* A write error indicates that the peer has dropped the connection */
transport->layer = TRANSPORT_LAYER_CLOSED;
}
if (s->pool)
Stream_Release(s);
ReleaseMutex(transport->WriteMutex);
return status;
}
void transport_get_fds(rdpTransport* transport, void** rfds, int* rcount)
{
void* pfd;
#ifdef _WIN32
rfds[*rcount] = transport->TcpIn->wsa_event;
(*rcount)++;
if (transport->SplitInputOutput)
{
rfds[*rcount] = transport->TcpOut->wsa_event;
(*rcount)++;
}
#else
rfds[*rcount] = (void*)(long)(transport->TcpIn->sockfd);
(*rcount)++;
if (transport->SplitInputOutput)
{
rfds[*rcount] = (void*)(long)(transport->TcpOut->sockfd);
(*rcount)++;
}
#endif
pfd = GetEventWaitObject(transport->ReceiveEvent);
if (pfd)
{
rfds[*rcount] = pfd;
(*rcount)++;
}
if (transport->GatewayEvent)
{
pfd = GetEventWaitObject(transport->GatewayEvent);
if (pfd)
{
rfds[*rcount] = pfd;
(*rcount)++;
}
}
}
void transport_get_read_handles(rdpTransport* transport, HANDLE* events, DWORD* count)
{
events[*count] = tcp_get_event_handle(transport->TcpIn);
(*count)++;
if (transport->SplitInputOutput)
{
events[*count] = tcp_get_event_handle(transport->TcpOut);
(*count)++;
}
if (transport->ReceiveEvent)
{
events[*count] = transport->ReceiveEvent;
(*count)++;
}
if (transport->GatewayEvent)
{
events[*count] = transport->GatewayEvent;
(*count)++;
}
}
int transport_check_fds(rdpTransport** ptransport)
{
int pos;
int status;
UINT16 length;
int recv_status;
wStream* received;
rdpTransport* transport = *ptransport;
#ifdef _WIN32
WSAResetEvent(transport->TcpIn->wsa_event);
#endif
ResetEvent(transport->ReceiveEvent);
status = transport_read_nonblocking(transport);
if (status < 0)
return status;
while ((pos = Stream_GetPosition(transport->ReceiveBuffer)) > 0)
{
Stream_SetPosition(transport->ReceiveBuffer, 0);
if (tpkt_verify_header(transport->ReceiveBuffer)) /* TPKT */
{
/* Ensure the TPKT header is available. */
if (pos <= 4)
{
Stream_SetPosition(transport->ReceiveBuffer, pos);
return 0;
}
length = tpkt_read_header(transport->ReceiveBuffer);
}
else if (nla_verify_header(transport->ReceiveBuffer))
{
/* TSRequest */
/* Ensure the TSRequest header is available. */
if (pos <= 4)
{
Stream_SetPosition(transport->ReceiveBuffer, pos);
return 0;
}
/* TSRequest header can be 2, 3 or 4 bytes long */
length = nla_header_length(transport->ReceiveBuffer);
if (pos < length)
{
Stream_SetPosition(transport->ReceiveBuffer, pos);
return 0;
}
length = nla_read_header(transport->ReceiveBuffer);
}
else /* Fast Path */
{
/* Ensure the Fast Path header is available. */
if (pos <= 2)
{
Stream_SetPosition(transport->ReceiveBuffer, pos);
return 0;
}
/* Fastpath header can be two or three bytes long. */
length = fastpath_header_length(transport->ReceiveBuffer);
if (pos < length)
{
Stream_SetPosition(transport->ReceiveBuffer, pos);
return 0;
}
length = fastpath_read_header(NULL, transport->ReceiveBuffer);
}
if (length == 0)
{
fprintf(stderr, "transport_check_fds: protocol error, not a TPKT or Fast Path header.\n");
winpr_HexDump(Stream_Buffer(transport->ReceiveBuffer), pos);
return -1;
}
if (pos < length)
{
Stream_SetPosition(transport->ReceiveBuffer, pos);
return 0; /* Packet is not yet completely received. */
}
received = transport->ReceiveBuffer;
transport->ReceiveBuffer = StreamPool_Take(transport->ReceivePool, 0);
Stream_SetPosition(received, length);
Stream_SealLength(received);
Stream_SetPosition(received, 0);
recv_status = transport->ReceiveCallback(transport, received, transport->ReceiveExtra);
Stream_Release(received);
if (recv_status < 0)
status = -1;
if (status < 0)
return status;
/* transport might now have been freed by rdp_client_redirect and a new rdp->transport created */
transport = *ptransport;
}
return 0;
}
BOOL transport_set_blocking_mode(rdpTransport* transport, BOOL blocking)
{
BOOL status;
status = TRUE;
transport->blocking = blocking;
if (transport->SplitInputOutput)
{
status &= tcp_set_blocking_mode(transport->TcpIn, blocking);
status &= tcp_set_blocking_mode(transport->TcpOut, blocking);
}
else
{
status &= tcp_set_blocking_mode(transport->TcpIn, blocking);
}
if (transport->layer == TRANSPORT_LAYER_TSG)
{
tsg_set_blocking_mode(transport->tsg, blocking);
}
return status;
}
static void* transport_client_thread(void* arg)
{
DWORD status;
DWORD nCount;
HANDLE events[32];
freerdp* instance;
rdpContext* context;
rdpTransport* transport;
TerminateEventArgs e;
transport = (rdpTransport*) arg;
instance = (freerdp*) transport->settings->instance;
context = instance->context;
while (1)
{
nCount = 0;
events[nCount++] = transport->stopEvent;
events[nCount] = transport->connectedEvent;
status = WaitForMultipleObjects(nCount + 1, events, FALSE, INFINITE);
if (WaitForSingleObject(transport->stopEvent, 0) == WAIT_OBJECT_0)
{
break;
}
transport_get_read_handles(transport, (HANDLE*) &events, &nCount);
status = WaitForMultipleObjects(nCount, events, FALSE, INFINITE);
if (WaitForSingleObject(transport->stopEvent, 0) == WAIT_OBJECT_0)
{
break;
}
if (!freerdp_check_fds(instance))
break;
}
return NULL;
}
rdpTransport* transport_new(rdpSettings* settings)
{
rdpTransport* transport;
transport = (rdpTransport*) malloc(sizeof(rdpTransport));
if (transport != NULL)
{
ZeroMemory(transport, sizeof(rdpTransport));
transport->TcpIn = tcp_new(settings);
transport->settings = settings;
/* a small 0.1ms delay when transport is blocking. */
transport->SleepInterval = 100;
transport->ReceivePool = StreamPool_New(TRUE, BUFFER_SIZE);
/* receive buffer for non-blocking read. */
transport->ReceiveBuffer = StreamPool_Take(transport->ReceivePool, 0);
transport->ReceiveEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
transport->connectedEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
transport->blocking = TRUE;
transport->ReadMutex = CreateMutex(NULL, FALSE, NULL);
transport->WriteMutex = CreateMutex(NULL, FALSE, NULL);
transport->layer = TRANSPORT_LAYER_TCP;
}
return transport;
}
void transport_free(rdpTransport* transport)
{
if (transport != NULL)
{
SetEvent(transport->stopEvent);
if (transport->ReceiveBuffer)
Stream_Release(transport->ReceiveBuffer);
StreamPool_Free(transport->ReceivePool);
CloseHandle(transport->ReceiveEvent);
CloseHandle(transport->connectedEvent);
if (transport->TlsIn)
tls_free(transport->TlsIn);
if (transport->TlsOut != transport->TlsIn)
tls_free(transport->TlsOut);
tcp_free(transport->TcpIn);
if (transport->TcpOut != transport->TcpIn)
tcp_free(transport->TcpOut);
tsg_free(transport->tsg);
CloseHandle(transport->ReadMutex);
CloseHandle(transport->WriteMutex);
free(transport);
}
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_5715_2 |
crossvul-cpp_data_good_864_0 | /* $Id: pcpserver.c,v 1.47 2018/03/13 10:21:19 nanard Exp $ */
/* MiniUPnP project
* Website : http://miniupnp.free.fr/
* Author : Peter Tatrai
Copyright (c) 2013 by Cisco Systems, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* Current assumptions:
- IPv4 is always NATted (internal -> external)
- IPv6 is always firewalled (this may need some work, NAT6* do exist)
- we make the judgement based on (in order, picking first one available):
- third party address
- internal client address
TODO : handle NAT46, NAT64, NPT66. In addition, beyond FW/NAT
choice, should also add for proxy (=as much as possible transparent
pass-through to one or more servers).
TODO: IPv6 permission handling (for the time being, we just assume
anyone on IPv6 is a good guy, but fixing that would include
upnppermissions rewrite to be AF neutral).
*/
#include "config.h"
#ifdef ENABLE_PCP
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <time.h>
#include <signal.h>
#include <stdio.h>
#include <ctype.h>
#include <syslog.h>
#include "pcpserver.h"
#include "natpmp.h"
#include "macros.h"
#include "upnpglobalvars.h"
#include "pcplearndscp.h"
#include "upnpredirect.h"
#include "commonrdr.h"
#include "getifaddr.h"
#include "asyncsendto.h"
#include "upnputils.h"
#include "portinuse.h"
#include "pcp_msg_struct.h"
#ifdef ENABLE_UPNPPINHOLE
#include "upnppinhole.h"
#endif /* ENABLE_UPNPPINHOLE */
#ifdef PCP_PEER
/* TODO make this platform independent */
#ifdef USE_NETFILTER
#include "netfilter/iptcrdr.h"
#else
#error "PCP Peer is only supported with NETFILTER"
#endif /* USE_NETFILTER */
#endif /* PCP_PEER */
/* server specific information */
struct pcp_server_info {
uint8_t server_version;
};
/* default server settings, highest version supported is the default */
static const struct pcp_server_info this_server_info = {2};
/* structure holding information from PCP msg*/
/* all variables are in host byte order except IP addresses */
typedef struct pcp_info {
uint8_t version;
uint8_t opcode;
uint8_t result_code;
uint32_t lifetime; /* lifetime of the mapping */
uint32_t epochtime;
/* both MAP and PEER opcode specific information */
uint32_t nonce[3]; /* random value generated by client */
uint8_t protocol;
uint16_t int_port;
const struct in6_addr *int_ip; /* in network order */
uint16_t ext_port;
const struct in6_addr *ext_ip; /* Suggested external IP in network order*/
/* PEER specific information */
#ifdef PCP_PEER
uint16_t peer_port;
const struct in6_addr *peer_ip; /* Destination IP in network order */
#endif /* PCP_PEER */
#ifdef PCP_SADSCP
/* SADSCP specific information */
uint8_t delay_tolerance;
uint8_t loss_tolerance;
uint8_t jitter_tolerance;
uint8_t app_name_len;
const char* app_name;
uint8_t sadscp_dscp;
uint8_t matched_name;
int8_t is_sadscp_op;
#endif
#ifdef PCP_FLOWP
uint8_t dscp_up;
uint8_t dscp_down;
int flowp_present;
#endif
uint8_t is_map_op;
uint8_t is_peer_op;
const struct in6_addr *thirdp_ip;
const struct in6_addr *mapped_ip;
char mapped_str[INET6_ADDRSTRLEN];
int pfailure_present;
struct in6_addr sender_ip;
int is_fw; /* is this firewall operation? if not, nat. */
char desc[64];
} pcp_info_t;
/* getPCPOpCodeStr()
* return a string representation of the PCP OpCode
* can be used for debug output */
static const char * getPCPOpCodeStr(uint8_t opcode)
{
switch(opcode) {
case PCP_OPCODE_ANNOUNCE:
return "ANNOUNCE";
case PCP_OPCODE_MAP:
return "MAP";
case PCP_OPCODE_PEER:
return "PEER";
#ifdef PCP_SADSCP
case PCP_OPCODE_SADSCP:
return "SADSCP";
#endif /* PCP_SADSCP */
default:
return "UNKNOWN";
}
}
/* useful to copy ext_ip only if needed, as request and response
* buffers are same */
static void copyIPv6IfDifferent(void * dest, const void * src)
{
if(dest != src && src != NULL) {
memcpy(dest, src, sizeof(struct in6_addr));
}
}
#ifdef PCP_SADSCP
int get_dscp_value(pcp_info_t *pcp_msg_info) {
unsigned int ind;
for (ind = 0; ind < num_dscp_values; ind++) {
if ((dscp_values_list[ind].app_name) &&
(!strcmp(dscp_values_list[ind].app_name,
pcp_msg_info->app_name)) &&
(pcp_msg_info->delay_tolerance == dscp_values_list[ind].delay) &&
(pcp_msg_info->loss_tolerance == dscp_values_list[ind].loss) &&
(pcp_msg_info->jitter_tolerance == dscp_values_list[ind].jitter)
)
{
pcp_msg_info->sadscp_dscp = dscp_values_list[ind].dscp_value;
pcp_msg_info->matched_name = 1;
return 0;
} else
if ((pcp_msg_info->app_name_len==0) &&
(dscp_values_list[ind].app_name_len==0) &&
(pcp_msg_info->delay_tolerance == dscp_values_list[ind].delay) &&
(pcp_msg_info->loss_tolerance == dscp_values_list[ind].loss) &&
(pcp_msg_info->jitter_tolerance == dscp_values_list[ind].jitter)
)
{
pcp_msg_info->sadscp_dscp = dscp_values_list[ind].dscp_value;
pcp_msg_info->matched_name = 0;
return 0;
} else
if ((dscp_values_list[ind].app_name_len==0) &&
(pcp_msg_info->delay_tolerance == dscp_values_list[ind].delay) &&
(pcp_msg_info->loss_tolerance == dscp_values_list[ind].loss) &&
(pcp_msg_info->jitter_tolerance == dscp_values_list[ind].jitter)
)
{
pcp_msg_info->sadscp_dscp = dscp_values_list[ind].dscp_value;
pcp_msg_info->matched_name = 0;
return 0;
}
}
//if nothing matched return Default value i.e. 0
pcp_msg_info->sadscp_dscp = 0;
pcp_msg_info->matched_name = 0;
return 0;
}
#endif
/*
* Function extracting information from common_req (common request header)
* into pcp_msg_info.
* @return : when no problem occurred 0 is returned, 1 otherwise and appropriate
* result code is assigned to pcp_msg_info->result_code to indicate
* what kind of error occurred
*/
static int parseCommonRequestHeader(const uint8_t *common_req, pcp_info_t *pcp_msg_info)
{
pcp_msg_info->version = common_req[0] ;
pcp_msg_info->opcode = common_req[1] & 0x7f;
pcp_msg_info->lifetime = READNU32(common_req + 4);
pcp_msg_info->int_ip = (struct in6_addr *)(common_req + 8);
pcp_msg_info->mapped_ip = (struct in6_addr *)(common_req + 8);
if ( (pcp_msg_info->version > this_server_info.server_version) ) {
pcp_msg_info->result_code = PCP_ERR_UNSUPP_VERSION;
return 1;
}
if (pcp_msg_info->lifetime > max_lifetime ) {
pcp_msg_info->lifetime = max_lifetime;
}
if ( (pcp_msg_info->lifetime < min_lifetime) && (pcp_msg_info->lifetime != 0) ) {
pcp_msg_info->lifetime = min_lifetime;
}
return 0;
}
#ifdef DEBUG
static void printMAPOpcodeVersion1(const uint8_t *buf)
{
char map_addr[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "PCP MAP: v1 Opcode specific information. \n");
syslog(LOG_DEBUG, "MAP protocol: \t\t %d\n", (int)buf[0] );
syslog(LOG_DEBUG, "MAP int port: \t\t %d\n", (int)READNU16(buf+4));
syslog(LOG_DEBUG, "MAP ext port: \t\t %d\n", (int)READNU16(buf+6));
syslog(LOG_DEBUG, "MAP Ext IP: \t\t %s\n", inet_ntop(AF_INET6,
buf+8, map_addr, INET6_ADDRSTRLEN));
}
static void printMAPOpcodeVersion2(const uint8_t *buf)
{
char map_addr[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "PCP MAP: v2 Opcode specific information.");
syslog(LOG_DEBUG, "MAP nonce: \t%08x%08x%08x",
READNU32(buf), READNU32(buf+4), READNU32(buf+8));
syslog(LOG_DEBUG, "MAP protocol:\t%d", (int)buf[12]);
syslog(LOG_DEBUG, "MAP int port:\t%d", (int)READNU16(buf+16));
syslog(LOG_DEBUG, "MAP ext port:\t%d", (int)READNU16(buf+18));
syslog(LOG_DEBUG, "MAP Ext IP: \t%s", inet_ntop(AF_INET6,
buf+20, map_addr, INET6_ADDRSTRLEN));
}
#endif /* DEBUG */
static void parsePCPMAP_version1(const uint8_t *map_v1,
pcp_info_t *pcp_msg_info)
{
pcp_msg_info->is_map_op = 1;
pcp_msg_info->protocol = map_v1[0];
pcp_msg_info->int_port = READNU16(map_v1 + 4);
pcp_msg_info->ext_port = READNU16(map_v1 + 6);
pcp_msg_info->ext_ip = (struct in6_addr *)(map_v1 + 8);
}
static void parsePCPMAP_version2(const uint8_t *map_v2,
pcp_info_t *pcp_msg_info)
{
pcp_msg_info->is_map_op = 1;
memcpy(pcp_msg_info->nonce, map_v2, 12);
pcp_msg_info->protocol = map_v2[12];
pcp_msg_info->int_port = READNU16(map_v2 + 16);
pcp_msg_info->ext_port = READNU16(map_v2 + 18);
pcp_msg_info->ext_ip = (struct in6_addr *)(map_v2 + 20);
}
#ifdef PCP_PEER
#ifdef DEBUG
static void printPEEROpcodeVersion1(const uint8_t *buf)
{
char ext_addr[INET6_ADDRSTRLEN];
char peer_addr[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "PCP PEER: v1 Opcode specific information. \n");
syslog(LOG_DEBUG, "Protocol: \t\t %d\n", (int)buf[0]);
syslog(LOG_DEBUG, "Internal port: \t\t %d\n", READNU16(buf + 4));
syslog(LOG_DEBUG, "External IP: \t\t %s\n", inet_ntop(AF_INET6, buf + 8,
ext_addr,INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "External port port: \t\t %d\n", READNU16(buf + 6));
syslog(LOG_DEBUG, "PEER IP: \t\t %s\n", inet_ntop(AF_INET6, buf + 28,
peer_addr,INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "PEER port port: \t\t %d\n", READNU16(buf + 24));
}
static void printPEEROpcodeVersion2(const uint8_t *buf)
{
char ext_addr[INET6_ADDRSTRLEN];
char peer_addr[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "PCP PEER: v2 Opcode specific information.");
syslog(LOG_DEBUG, "nonce: \t%08x%08x%08x",
READNU32(buf), READNU32(buf+4), READNU32(buf+8));
syslog(LOG_DEBUG, "Protocol: \t%d", buf[12]);
syslog(LOG_DEBUG, "Internal port:\t%d", READNU16(buf + 16));
syslog(LOG_DEBUG, "External IP: \t%s", inet_ntop(AF_INET6, buf + 20,
ext_addr, INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "External port:\t%d", READNU16(buf + 18));
syslog(LOG_DEBUG, "PEER IP: \t%s", inet_ntop(AF_INET6, buf + 40,
peer_addr, INET6_ADDRSTRLEN));
syslog(LOG_DEBUG, "PEER port: \t%d", READNU16(buf + 36));
}
#endif /* DEBUG */
/*
* Function extracting information from peer_buf to pcp_msg_info
* @return : when no problem occurred 0 is returned, 1 otherwise
*/
static void parsePCPPEER_version1(const uint8_t *buf,
pcp_info_t *pcp_msg_info)
{
pcp_msg_info->is_peer_op = 1;
pcp_msg_info->protocol = buf[0];
pcp_msg_info->int_port = READNU16(buf + 4);
pcp_msg_info->ext_port = READNU16(buf + 6);
pcp_msg_info->peer_port = READNU16(buf + 24);
pcp_msg_info->ext_ip = (struct in6_addr *)(buf + 8);
pcp_msg_info->peer_ip = (struct in6_addr *)(buf + 28);
}
/*
* Function extracting information from peer_buf to pcp_msg_info
* @return : when no problem occurred 0 is returned, 1 otherwise
*/
static void parsePCPPEER_version2(const uint8_t *buf, pcp_info_t *pcp_msg_info)
{
pcp_msg_info->is_peer_op = 1;
memcpy(pcp_msg_info->nonce, buf, 12);
pcp_msg_info->protocol = buf[12];
pcp_msg_info->int_port = READNU16(buf + 16);
pcp_msg_info->ext_port = READNU16(buf + 18);
pcp_msg_info->peer_port = READNU16(buf + 36);
pcp_msg_info->ext_ip = (struct in6_addr *)(buf + 20);
pcp_msg_info->peer_ip = (struct in6_addr *)(buf + 40);
}
#endif /* PCP_PEER */
#ifdef PCP_SADSCP
#ifdef DEBUG
static void printSADSCPOpcode(const uint8_t *buf)
{
unsigned char sadscp_tol;
sadscp_tol = buf[12]; /* tolerance_fields */
syslog(LOG_DEBUG, "PCP SADSCP: Opcode specific information.\n");
syslog(LOG_DEBUG, "Delay tolerance %d \n", (sadscp_tol>>6)&3);
syslog(LOG_DEBUG, "Loss tolerance %d \n", (sadscp_tol>>4)&3);
syslog(LOG_DEBUG, "Jitter tolerance %d \n", (sadscp_tol>>2)&3);
syslog(LOG_DEBUG, "RRR %d \n", sadscp_tol&3);
syslog(LOG_DEBUG, "AppName Length %d \n", buf[13]);
syslog(LOG_DEBUG, "Application name %.*s \n", buf[13], buf + 14);
}
#endif //DEBUG
static int parseSADSCP(const uint8_t *buf, pcp_info_t *pcp_msg_info)
{
pcp_msg_info->delay_tolerance = (buf[12]>>6)&3;
pcp_msg_info->loss_tolerance = (buf[12]>>4)&3;
pcp_msg_info->jitter_tolerance = (buf[12]>>2)&3;
if (pcp_msg_info->delay_tolerance == 3 ||
pcp_msg_info->loss_tolerance == 3 ||
pcp_msg_info->jitter_tolerance == 3 ) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1;
}
pcp_msg_info->app_name = (const char *)(buf + 14);
pcp_msg_info->app_name_len = buf[13];
return 0;
}
#endif /* PCP_SADSCP */
static int parsePCPOption(uint8_t* pcp_buf, int remain, pcp_info_t *pcp_msg_info)
{
#ifdef DEBUG
char third_addr[INET6_ADDRSTRLEN];
#endif /* DEBUG */
unsigned short option_length;
/* Do centralized option sanity checks here. */
if (remain < (int)PCP_OPTION_HDR_SIZE) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
}
option_length = READNU16(pcp_buf + 2) + 4; /* len */
if (remain < option_length) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
}
switch (pcp_buf[0]) { /* code */
case PCP_OPTION_3RD_PARTY:
if (option_length != PCP_3RD_PARTY_OPTION_SIZE) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
}
#ifdef DEBUG
syslog(LOG_DEBUG, "PCP OPTION: \t Third party\n");
syslog(LOG_DEBUG, "Third PARTY IP: \t %s\n", inet_ntop(AF_INET6,
pcp_buf + 4, third_addr, INET6_ADDRSTRLEN));
#endif
if (pcp_msg_info->thirdp_ip ) {
syslog(LOG_ERR, "PCP: THIRD PARTY OPTION was already present. \n");
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
} else {
pcp_msg_info->thirdp_ip = (struct in6_addr *)(pcp_buf + 4);
pcp_msg_info->mapped_ip = (struct in6_addr *)(pcp_buf + 4);
}
break;
case PCP_OPTION_PREF_FAIL:
if (option_length != PCP_PREFER_FAIL_OPTION_SIZE) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
}
#ifdef DEBUG
syslog(LOG_DEBUG, "PCP OPTION: \t Prefer failure \n");
#endif
if (pcp_msg_info->opcode != PCP_OPCODE_MAP) {
syslog(LOG_DEBUG, "PCP: Unsupported OPTION for given OPCODE.\n");
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
}
if (pcp_msg_info->pfailure_present != 0 ) {
syslog(LOG_DEBUG, "PCP: PREFER FAILURE OPTION was already present.\n");
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
} else {
pcp_msg_info->pfailure_present = 1;
}
break;
case PCP_OPTION_FILTER:
/* TODO fully implement filter */
if (option_length != PCP_FILTER_OPTION_SIZE) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
}
#ifdef DEBUG
syslog(LOG_DEBUG, "PCP OPTION: \t Filter\n");
#endif
if (pcp_msg_info->opcode != PCP_OPCODE_MAP) {
syslog(LOG_ERR, "PCP: Unsupported OPTION for given OPCODE.\n");
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 0;
}
break;
#ifdef PCP_FLOWP
case PCP_OPTION_FLOW_PRIORITY:
#ifdef DEBUG
syslog(LOG_DEBUG, "PCP OPTION: \t Flow priority\n");
#endif
if (option_length != PCP_FLOW_PRIORITY_OPTION_SIZE) {
syslog(LOG_ERR, "PCP: Error processing DSCP. sizeof %d and remaining %d. flow len %d \n",
PCP_FLOW_PRIORITY_OPTION_SIZE, remain, READNU16(pcp_buf + 2));
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
}
#ifdef DEBUG
syslog(LOG_DEBUG, "DSCP UP: \t %d\n", pcp_buf[4]);
syslog(LOG_DEBUG, "DSCP DOWN: \t %d\n", pcp_buf[5]);
#endif
pcp_msg_info->dscp_up = pcp_buf[4];
pcp_msg_info->dscp_down = pcp_buf[5];
pcp_msg_info->flowp_present = 1;
break;
#endif
default:
if (pcp_buf[0] < 128) {
syslog(LOG_ERR, "PCP: Unrecognized mandatory PCP OPTION: %d \n", (int)pcp_buf[0]);
/* Mandatory to understand */
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPTION;
remain = 0;
break;
}
/* TODO - log optional not understood options? */
break;
}
return option_length;
}
static void parsePCPOptions(void* pcp_buf, int remain, pcp_info_t *pcp_msg_info)
{
int option_length;
while (remain > 0) {
option_length = parsePCPOption(pcp_buf, remain, pcp_msg_info);
if (!option_length)
break;
remain -= option_length;
pcp_buf += option_length;
}
if (remain > 0) {
syslog(LOG_WARNING, "%s: remain=%d", "parsePCPOptions", remain);
}
}
/* CheckExternalAddress()
* Check that suggested external address in request match a real external
* IP address.
* Suggested address can also be 0 IPv4 or IPv6 address.
* (see http://tools.ietf.org/html/rfc6887#section-10 )
* return values :
* 0 : check is OK
* -1 : check failed */
static int CheckExternalAddress(pcp_info_t* pcp_msg_info)
{
/* can contain a IPv4-mapped IPv6 address */
static struct in6_addr external_addr;
int af;
af = IN6_IS_ADDR_V4MAPPED(pcp_msg_info->mapped_ip)
? AF_INET : AF_INET6;
pcp_msg_info->is_fw = af == AF_INET6;
if (pcp_msg_info->is_fw) {
external_addr = *pcp_msg_info->mapped_ip;
} else {
/* TODO : be able to handle case with multiple
* external addresses */
if(use_ext_ip_addr) {
if (inet_pton(AF_INET, use_ext_ip_addr,
((uint32_t*)external_addr.s6_addr)+3) == 1) {
((uint32_t*)external_addr.s6_addr)[0] = 0;
((uint32_t*)external_addr.s6_addr)[1] = 0;
((uint32_t*)external_addr.s6_addr)[2] = htonl(0xFFFF);
} else if (inet_pton(AF_INET6, use_ext_ip_addr, external_addr.s6_addr)
!= 1) {
pcp_msg_info->result_code = PCP_ERR_NETWORK_FAILURE;
return -1;
}
} else {
if(!ext_if_name || ext_if_name[0]=='\0') {
pcp_msg_info->result_code = PCP_ERR_NETWORK_FAILURE;
return -1;
}
if(getifaddr_in6(ext_if_name, af, &external_addr) < 0) {
pcp_msg_info->result_code = PCP_ERR_NETWORK_FAILURE;
return -1;
}
}
}
if (pcp_msg_info->ext_ip == NULL ||
IN6_IS_ADDR_UNSPECIFIED(pcp_msg_info->ext_ip) ||
(IN6_IS_ADDR_V4MAPPED(pcp_msg_info->ext_ip)
&& ((uint32_t *)pcp_msg_info->ext_ip->s6_addr)[3] == INADDR_ANY)) {
/* no suggested external address : use real external address */
pcp_msg_info->ext_ip = &external_addr;
return 0;
}
if (!IN6_ARE_ADDR_EQUAL(pcp_msg_info->ext_ip, &external_addr)) {
syslog(LOG_ERR,
"PCP: External IP in request didn't match interface IP \n");
#ifdef DEBUG
{
char s[INET6_ADDRSTRLEN];
syslog(LOG_DEBUG, "Interface IP %s \n",
inet_ntop(AF_INET6, &external_addr.s6_addr, s, sizeof(s)));
syslog(LOG_DEBUG, "IP in the PCP request %s \n",
inet_ntop(AF_INET6, pcp_msg_info->ext_ip, s, sizeof(s)));
}
#endif
if (pcp_msg_info->pfailure_present) {
pcp_msg_info->result_code = PCP_ERR_CANNOT_PROVIDE_EXTERNAL;
return -1;
} else {
pcp_msg_info->ext_ip = &external_addr;
}
}
return 0;
}
static const char* inet_n46top(const struct in6_addr* in,
char* buf, size_t buf_len)
{
if (IN6_IS_ADDR_V4MAPPED(in)) {
return inet_ntop(AF_INET, ((uint32_t*)(in->s6_addr))+3, buf, buf_len);
} else {
return inet_ntop(AF_INET6, in, buf, buf_len);
}
}
#ifdef PCP_PEER
static void FillSA(struct sockaddr *sa, const struct in6_addr *in6,
uint16_t port)
{
if (IN6_IS_ADDR_V4MAPPED(in6)) {
struct sockaddr_in *sa4 = (struct sockaddr_in *)sa;
sa4->sin_family = AF_INET;
sa4->sin_addr.s_addr = ((uint32_t*)(in6)->s6_addr)[3];
sa4->sin_port = htons(port);
} else {
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)sa;
sa6->sin6_family = AF_INET6;
sa6->sin6_addr = *in6;
sa6->sin6_port = htons(port);
}
}
static const char* inet_satop(struct sockaddr* sa, char* buf, size_t buf_len)
{
if (sa->sa_family == AF_INET) {
return inet_ntop(AF_INET, &(((struct sockaddr_in*)sa)->sin_addr), buf, buf_len);
} else {
return inet_ntop(AF_INET6, &(((struct sockaddr_in6*)sa)->sin6_addr), buf, buf_len);
}
}
static int CreatePCPPeer_NAT(pcp_info_t *pcp_msg_info)
{
struct sockaddr_storage intip;
struct sockaddr_storage peerip;
struct sockaddr_storage extip;
struct sockaddr_storage ret_extip;
uint8_t proto = pcp_msg_info->protocol;
uint16_t eport = pcp_msg_info->ext_port; /* public port */
char peerip_s[INET6_ADDRSTRLEN], extip_s[INET6_ADDRSTRLEN];
time_t timestamp = upnp_time() + pcp_msg_info->lifetime;
int r;
FillSA((struct sockaddr*)&intip, pcp_msg_info->mapped_ip,
pcp_msg_info->int_port);
FillSA((struct sockaddr*)&peerip, pcp_msg_info->peer_ip,
pcp_msg_info->peer_port);
FillSA((struct sockaddr*)&extip, pcp_msg_info->ext_ip,
eport);
inet_satop((struct sockaddr*)&peerip, peerip_s, sizeof(peerip_s));
inet_satop((struct sockaddr*)&extip, extip_s, sizeof(extip_s));
/* check if connection with given peer exists, if it was */
/* already established use this external port */
if (get_nat_ext_addr( (struct sockaddr*)&intip, (struct sockaddr*)&peerip,
proto, (struct sockaddr*)&ret_extip) == 1) {
if (ret_extip.ss_family == AF_INET) {
struct sockaddr_in* ret_ext4 = (struct sockaddr_in*)&ret_extip;
uint16_t ret_eport = ntohs(ret_ext4->sin_port);
eport = ret_eport;
} else if (ret_extip.ss_family == AF_INET6) {
struct sockaddr_in6* ret_ext6 = (struct sockaddr_in6*)&ret_extip;
uint16_t ret_eport = ntohs(ret_ext6->sin6_port);
eport = ret_eport;
} else {
return PCP_ERR_CANNOT_PROVIDE_EXTERNAL;
}
}
/* Create Peer Mapping */
if (eport == 0) {
eport = pcp_msg_info->int_port;
}
#ifdef PCP_FLOWP
if (pcp_msg_info->flowp_present && pcp_msg_info->dscp_up) {
if (add_peer_dscp_rule2(ext_if_name, peerip_s,
pcp_msg_info->peer_port, pcp_msg_info->dscp_up,
pcp_msg_info->mapped_str, pcp_msg_info->int_port,
proto, pcp_msg_info->desc, timestamp) < 0 ) {
syslog(LOG_ERR, "PCP: failed to add flowp upstream mapping %s:%hu->%s:%hu '%s'",
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
peerip_s,
pcp_msg_info->peer_port,
pcp_msg_info->desc);
return PCP_ERR_NO_RESOURCES;
}
}
if (pcp_msg_info->flowp_present && pcp_msg_info->dscp_down) {
if (add_peer_dscp_rule2(ext_if_name, pcp_msg_info->mapped_str,
pcp_msg_info->int_port, pcp_msg_info->dscp_down,
peerip_s, pcp_msg_info->peer_port, proto, pcp_msg_info->desc, timestamp)
< 0 ) {
syslog(LOG_ERR, "PCP: failed to add flowp downstream mapping %s:%hu->%s:%hu '%s'",
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
peerip_s,
pcp_msg_info->peer_port,
pcp_msg_info->desc);
pcp_msg_info->result_code = PCP_ERR_NO_RESOURCES;
return PCP_ERR_NO_RESOURCES;
}
}
#endif
r = add_peer_redirect_rule2(ext_if_name,
peerip_s,
pcp_msg_info->peer_port,
extip_s,
eport,
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
pcp_msg_info->protocol,
pcp_msg_info->desc,
timestamp);
if (r < 0)
return PCP_ERR_NO_RESOURCES;
pcp_msg_info->ext_port = eport;
return PCP_SUCCESS;
}
static void CreatePCPPeer(pcp_info_t *pcp_msg_info)
{
char peerip_s[INET6_ADDRSTRLEN];
int r = -1;
if (!inet_n46top(pcp_msg_info->peer_ip, peerip_s, sizeof(peerip_s))) {
syslog(LOG_ERR, "inet_n46top(peer_ip): %m");
return;
}
if (pcp_msg_info->is_fw) {
#if 0
/* Someday, something like this is available.. and we're ready! */
#ifdef ENABLE_UPNPPINHOLE
pcp_msg_info->ext_port = pcp_msg_info->int_port;
r = upnp_add_outbound_pinhole(peerip_s,
pcp_msg_info->peer_port,
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
pcp_msg_info->protocol,
pcp_msg_info->desc,
pcp_msg_info->lifetime, NULL);
#endif /* ENABLE_UPNPPINHOLE */
#else
r = PCP_ERR_UNSUPP_OPCODE;
#endif /* 0 */
} else {
r = CreatePCPPeer_NAT(pcp_msg_info);
}
/* TODO: add upnp function for PI */
pcp_msg_info->result_code = r;
syslog(r == PCP_SUCCESS ? LOG_INFO : LOG_ERR,
"PCP PEER: %s peer mapping %s %s:%hu(%hu)->%s:%hu '%s'",
r == PCP_SUCCESS ? "added" : "failed to add",
(pcp_msg_info->protocol==IPPROTO_TCP)?"TCP":"UDP",
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
pcp_msg_info->ext_port,
peerip_s,
pcp_msg_info->peer_port,
pcp_msg_info->desc);
}
static void DeletePCPPeer(pcp_info_t *pcp_msg_info)
{
uint16_t iport = pcp_msg_info->int_port; /* private port */
uint16_t rport = pcp_msg_info->peer_port; /* private port */
uint8_t proto = pcp_msg_info->protocol;
char rhost[INET6_ADDRSTRLEN];
int r = -1;
/* remove requested mappings for this client */
int index = 0;
unsigned short eport2, iport2, rport2;
char iaddr2[INET6_ADDRSTRLEN], rhost2[INET6_ADDRSTRLEN];
int proto2;
char desc[64];
unsigned int timestamp;
#if 0
int uid;
#endif /* 0 */
if (pcp_msg_info->is_fw) {
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPCODE;
return;
}
inet_n46top((struct in6_addr*)pcp_msg_info->peer_ip, rhost, sizeof(rhost));
for (index = 0 ;
(!pcp_msg_info->is_fw &&
get_peer_rule_by_index(index, 0,
&eport2, iaddr2, sizeof(iaddr2),
&iport2, &proto2,
desc, sizeof(desc),
rhost2, sizeof(rhost2), &rport2,
×tamp, 0, 0) >= 0)
#if 0
/* Some day if outbound pinholes are supported.. */
||
(pcp_msg_info->is_fw &&
(uid=upnp_get_pinhole_uid_by_index(index))>=0 &&
upnp_get_pinhole_info((unsigned short)uid,
rhost2, sizeof(rhost2), &rport2,
iaddr2, sizeof(iaddr2), &iport2,
&proto2, desc, sizeof(desc),
×tamp, NULL) >= 0)
#endif /* 0 */
;
index++)
if((0 == strcmp(iaddr2, pcp_msg_info->mapped_str))
&& (0 == strcmp(rhost2, rhost))
&& (proto2==proto)
&& 0 == strcmp(desc, pcp_msg_info->desc)
&& (iport2==iport) && (rport2==rport)) {
if (!pcp_msg_info->is_fw)
r = _upnp_delete_redir(eport2, proto2);
#if 0
else
r = upnp_delete_outboundpinhole(uid);
#endif /* 0 */
if(r<0) {
syslog(LOG_ERR, "PCP PEER: failed to remove peer mapping");
} else {
syslog(LOG_INFO, "PCP PEER: %s port %hu peer mapping removed",
proto2==IPPROTO_TCP?"TCP":"UDP", eport2);
}
return;
}
if (r==-1) {
syslog(LOG_ERR, "PCP PEER: Failed to find PCP mapping internal port %hu, protocol %s",
iport, (pcp_msg_info->protocol == IPPROTO_TCP)?"TCP":"UDP");
pcp_msg_info->result_code = PCP_ERR_NO_RESOURCES;
}
}
#endif /* PCP_PEER */
static int CreatePCPMap_NAT(pcp_info_t *pcp_msg_info)
{
int r = 0;
char iaddr_old[INET6_ADDRSTRLEN];
uint16_t iport_old, eport_first = 0;
int any_eport_allowed = 0;
unsigned int timestamp = upnp_time() + pcp_msg_info->lifetime;
if (pcp_msg_info->ext_port == 0) {
pcp_msg_info->ext_port = pcp_msg_info->int_port;
}
/* TODO: Support non-TCP/UDP */
if (pcp_msg_info->ext_port == 0) {
return PCP_ERR_MALFORMED_REQUEST;
}
do {
if (eport_first == 0) { /* first time in loop */
eport_first = pcp_msg_info->ext_port;
} else if (pcp_msg_info->ext_port == eport_first) { /* no eport available */
/* all eports rejected by permissions? */
if (any_eport_allowed == 0)
return PCP_ERR_NOT_AUTHORIZED;
/* at least one eport allowed (but none available) */
return PCP_ERR_NO_RESOURCES;
}
if ((IN6_IS_ADDR_V4MAPPED(pcp_msg_info->mapped_ip) &&
(!check_upnp_rule_against_permissions(upnppermlist,
num_upnpperm, pcp_msg_info->ext_port,
((struct in_addr*)pcp_msg_info->mapped_ip->s6_addr)[3],
pcp_msg_info->int_port)))) {
if (pcp_msg_info->pfailure_present) {
return PCP_ERR_CANNOT_PROVIDE_EXTERNAL;
}
pcp_msg_info->ext_port++;
if (pcp_msg_info->ext_port == 0) { /* skip port zero */
pcp_msg_info->ext_port++;
}
continue;
}
any_eport_allowed = 1;
#ifdef CHECK_PORTINUSE
if (port_in_use(ext_if_name, pcp_msg_info->ext_port, pcp_msg_info->protocol,
pcp_msg_info->mapped_str, pcp_msg_info->int_port) > 0) {
syslog(LOG_INFO, "port %hu protocol %s already in use",
pcp_msg_info->ext_port,
(pcp_msg_info->protocol==IPPROTO_TCP)?"tcp":"udp");
pcp_msg_info->ext_port++;
if (pcp_msg_info->ext_port == 0) { /* skip port zero */
pcp_msg_info->ext_port++;
}
continue;
}
#endif
r = get_redirect_rule(ext_if_name,
pcp_msg_info->ext_port,
pcp_msg_info->protocol,
iaddr_old, sizeof(iaddr_old),
&iport_old, 0, 0, 0, 0,
NULL/*×tamp*/, 0, 0);
if(r==0) {
if((strcmp(pcp_msg_info->mapped_str, iaddr_old)!=0)
|| (pcp_msg_info->int_port != iport_old)) {
/* redirection already existing */
if (pcp_msg_info->pfailure_present) {
return PCP_ERR_CANNOT_PROVIDE_EXTERNAL;
}
} else {
syslog(LOG_INFO, "port %hu %s already redirected to %s:%hu, replacing",
pcp_msg_info->ext_port, (pcp_msg_info->protocol==IPPROTO_TCP)?"tcp":"udp",
iaddr_old, iport_old);
/* remove and then add again */
if (_upnp_delete_redir(pcp_msg_info->ext_port,
pcp_msg_info->protocol)==0) {
break;
} else if (pcp_msg_info->pfailure_present) {
return PCP_ERR_CANNOT_PROVIDE_EXTERNAL;
}
}
pcp_msg_info->ext_port++;
if (pcp_msg_info->ext_port == 0) { /* skip port zero */
pcp_msg_info->ext_port++;
}
}
} while (r==0);
r = upnp_redirect_internal(NULL,
pcp_msg_info->ext_port,
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
pcp_msg_info->protocol,
pcp_msg_info->desc,
timestamp);
if (r < 0)
return PCP_ERR_NO_RESOURCES;
return PCP_SUCCESS;
}
static int CreatePCPMap_FW(pcp_info_t *pcp_msg_info)
{
#ifdef ENABLE_UPNPPINHOLE
int uid;
int r;
/* first check if pinhole already exists */
uid = upnp_find_inboundpinhole(NULL, 0,
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
pcp_msg_info->protocol,
NULL, 0, /* desc */
NULL /* lifetime */);
if(uid >= 0) {
/* pinhole already exists, updating */
syslog(LOG_INFO, "updating pinhole to %s:%hu %s",
pcp_msg_info->mapped_str, pcp_msg_info->int_port,
(pcp_msg_info->protocol == IPPROTO_TCP)?"TCP":"UDP");
r = upnp_update_inboundpinhole((unsigned short)uid, pcp_msg_info->lifetime);
return r >= 0 ? PCP_SUCCESS : PCP_ERR_NO_RESOURCES;
} else {
r = upnp_add_inboundpinhole(NULL, 0,
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
pcp_msg_info->protocol,
pcp_msg_info->desc,
pcp_msg_info->lifetime,
&uid);
if (r < 0)
return PCP_ERR_NO_RESOURCES;
pcp_msg_info->ext_port = pcp_msg_info->int_port;
return PCP_SUCCESS;
}
#else
UNUSED(pcp_msg_info);
return PCP_ERR_NO_RESOURCES;
#endif /* ENABLE_UPNPPINHOLE */
}
/* internal external PCP remote peer actual remote peer
* -------- ------- --------------- ------------------
* IPv4 firewall IPv4 IPv4 IPv4 IPv4
* IPv6 firewall IPv6 IPv6 IPv6 IPv6
* NAT44 IPv4 IPv4 IPv4 IPv4
* NAT46 IPv4 IPv6 IPv4 IPv6
* NAT64 IPv6 IPv4 IPv6 IPv4
* NPTv6 IPv6 IPv6 IPv6 IPv6
*
* Address Families with MAP and PEER
*
* The 'internal' address is implicitly the same as the source IP
* address of the PCP request, except when the THIRD_PARTY option is
* used.
*
* The 'external' address is the Suggested External Address field of the
* MAP or PEER request, and its address family is usually the same as
* the 'internal' address family, except when technologies like NAT64
* are used.
*
* The 'remote peer' address is the remote peer IP address of the PEER
* request or the FILTER option of the MAP request, and is always the
* same address family as the 'internal' address, even when NAT64 is
* used. In NAT64, the IPv6 PCP client is not necessarily aware of the
* NAT64 or aware of the actual IPv4 address of the remote peer, so it
* expresses the IPv6 address from its perspective. */
/* TODO: Support more than basic NAT44 / IPv6 firewall cases. */
static void CreatePCPMap(pcp_info_t *pcp_msg_info)
{
int r;
if (pcp_msg_info->is_fw)
r = CreatePCPMap_FW(pcp_msg_info);
else
r = CreatePCPMap_NAT(pcp_msg_info);
pcp_msg_info->result_code = r;
syslog(r == PCP_SUCCESS ? LOG_INFO : LOG_ERR,
"PCP MAP: %s mapping %s %hu->%s:%hu '%s'",
r == PCP_SUCCESS ? "added" : "failed to add",
(pcp_msg_info->protocol==IPPROTO_TCP)?"TCP":"UDP",
pcp_msg_info->ext_port,
pcp_msg_info->mapped_str,
pcp_msg_info->int_port,
pcp_msg_info->desc);
}
static void DeletePCPMap(pcp_info_t *pcp_msg_info)
{
uint16_t iport = pcp_msg_info->int_port; /* private port */
uint8_t proto = pcp_msg_info->protocol;
int r=-1;
/* remove the mapping */
/* remove all the mappings for this client */
int index;
unsigned short eport2, iport2;
char iaddr2[INET6_ADDRSTRLEN];
int proto2;
char desc[64];
unsigned int timestamp;
#ifdef ENABLE_UPNPPINHOLE
int uid = -1;
#endif /* ENABLE_UPNPPINHOLE */
/* iterate through all rules and delete the requested ones */
for (index = 0 ;
(!pcp_msg_info->is_fw &&
get_redirect_rule_by_index(index, 0,
&eport2, iaddr2, sizeof(iaddr2),
&iport2, &proto2,
desc, sizeof(desc),
0, 0, ×tamp, 0, 0) >= 0)
#ifdef ENABLE_UPNPPINHOLE
||
(pcp_msg_info->is_fw &&
(uid=upnp_get_pinhole_uid_by_index(index))>=0 &&
upnp_get_pinhole_info((unsigned short)uid,
NULL, 0, NULL,
iaddr2, sizeof(iaddr2), &iport2,
&proto2, desc, sizeof(desc),
×tamp, NULL) >= 0)
#endif /* ENABLE_UPNPPINHOLE */
;
index++)
if(0 == strcmp(iaddr2, pcp_msg_info->mapped_str)
&& (proto2==proto)
&& ((iport2==iport) || (iport==0))) {
if(0 != strcmp(desc, pcp_msg_info->desc)) {
/* nonce does not match */
pcp_msg_info->result_code = PCP_ERR_NOT_AUTHORIZED;
syslog(LOG_ERR, "Unauthorized to remove PCP mapping internal port %hu, protocol %s",
iport, (pcp_msg_info->protocol == IPPROTO_TCP)?"TCP":"UDP");
return;
} else if (!pcp_msg_info->is_fw) {
r = _upnp_delete_redir(eport2, proto2);
} else {
#ifdef ENABLE_UPNPPINHOLE
r = upnp_delete_inboundpinhole(uid);
#endif /* ENABLE_UPNPPINHOLE */
}
break;
}
if (r >= 0) {
syslog(LOG_INFO, "PCP: %s port %hu mapping removed",
proto2==IPPROTO_TCP?"TCP":"UDP", eport2);
} else {
syslog(LOG_ERR, "Failed to remove PCP mapping internal port %hu, protocol %s",
iport, (pcp_msg_info->protocol == IPPROTO_TCP)?"TCP":"UDP");
pcp_msg_info->result_code = PCP_ERR_NO_RESOURCES;
}
}
static int ValidatePCPMsg(pcp_info_t *pcp_msg_info)
{
if (pcp_msg_info->result_code) {
return 0;
}
/* RFC 6887, section 8.2: MUST return address mismatch if NAT
* in middle. */
if (memcmp(pcp_msg_info->int_ip,
&pcp_msg_info->sender_ip,
sizeof(pcp_msg_info->sender_ip)) != 0) {
pcp_msg_info->result_code = PCP_ERR_ADDRESS_MISMATCH;
return 0;
}
if (pcp_msg_info->thirdp_ip) {
if (!GETFLAG(PCP_ALLOWTHIRDPARTYMASK)) {
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPTION;
return 0;
}
/* RFC687, section 13.1 - if sender ip == THIRD_PARTY,
* it's an error. */
if (memcmp(pcp_msg_info->thirdp_ip,
&pcp_msg_info->sender_ip,
sizeof(pcp_msg_info->sender_ip)) == 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 0;
}
}
/* Produce mapped_str for future use. */
if (!inet_n46top(pcp_msg_info->mapped_ip, pcp_msg_info->mapped_str,
sizeof(pcp_msg_info->mapped_str))) {
syslog(LOG_ERR, "inet_ntop(pcpserver): %m");
return 0;
}
/* protocol zero means 'all protocols' : internal port MUST be zero */
if (pcp_msg_info->protocol == 0 && pcp_msg_info->int_port != 0) {
syslog(LOG_ERR, "PCP %s: Protocol was ZERO, but internal port "
"has non-ZERO value.", getPCPOpCodeStr(pcp_msg_info->opcode));
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 0;
}
if (pcp_msg_info->pfailure_present) {
if ( (IN6_IS_ADDR_UNSPECIFIED(pcp_msg_info->ext_ip) ||
((IN6_IS_ADDR_V4MAPPED(pcp_msg_info->ext_ip)) &&
(((uint32_t*)pcp_msg_info->ext_ip->s6_addr)[3] == 0))) &&
(pcp_msg_info->ext_port == 0)
)
{
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return 0;
}
}
if (CheckExternalAddress(pcp_msg_info)) {
return 0;
}
/* Fill in the desc that describes uniquely what flow we're
* dealing with (same code used in both create + delete of
* MAP/PEER) */
switch (pcp_msg_info->opcode) {
case PCP_OPCODE_MAP:
case PCP_OPCODE_PEER:
snprintf(pcp_msg_info->desc, sizeof(pcp_msg_info->desc),
"PCP %s %08x%08x%08x",
getPCPOpCodeStr(pcp_msg_info->opcode),
pcp_msg_info->nonce[0],
pcp_msg_info->nonce[1], pcp_msg_info->nonce[2]);
break;
}
return 1;
}
/*
* return value indicates whether the request is valid or not.
* Based on the return value simple response can be formed.
*/
static int processPCPRequest(void * req, int req_size, pcp_info_t *pcp_msg_info)
{
int remainingSize;
/* start with PCP_SUCCESS as result code,
* if everything is OK value will be unchanged */
pcp_msg_info->result_code = PCP_SUCCESS;
remainingSize = req_size;
/* discard request that exceeds maximal length,
or that is shorter than PCP_MIN_LEN (=24)
or that is not the multiple of 4 */
if (req_size < 3)
return 0; /* ignore msg */
if (req_size < PCP_MIN_LEN) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1; /* send response */
}
if ( (req_size > PCP_MAX_LEN) || ( (req_size & 3) != 0)) {
syslog(LOG_ERR, "PCP: Size of PCP packet(%d) is larger than %d bytes or "
"the size is not multiple of 4.\n", req_size, PCP_MAX_LEN);
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return 1; /* send response */
}
/* first parse request header */
if (parseCommonRequestHeader(req, pcp_msg_info) ) {
return 1;
}
remainingSize -= PCP_COMMON_REQUEST_SIZE;
req += PCP_COMMON_REQUEST_SIZE;
if (pcp_msg_info->version == 1) {
/* legacy PCP version 1 support */
switch (pcp_msg_info->opcode) {
case PCP_OPCODE_MAP:
remainingSize -= PCP_MAP_V1_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printMAPOpcodeVersion1(req);
#endif /* DEBUG */
parsePCPMAP_version1(req, pcp_msg_info);
req += PCP_MAP_V1_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPMap(pcp_msg_info);
} else {
CreatePCPMap(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v1 MAP message.");
return pcp_msg_info->result_code;
}
break;
#ifdef PCP_PEER
case PCP_OPCODE_PEER:
remainingSize -= PCP_PEER_V1_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printPEEROpcodeVersion1(req);
#endif /* DEBUG */
parsePCPPEER_version1(req, pcp_msg_info);
req += PCP_PEER_V1_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPPeer(pcp_msg_info);
} else {
CreatePCPPeer(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v1 PEER message.");
return pcp_msg_info->result_code;
}
break;
#endif /* PCP_PEER */
default:
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPCODE;
break;
}
} else if (pcp_msg_info->version == 2) {
/* RFC 6887 PCP support
* http://tools.ietf.org/html/rfc6887 */
switch (pcp_msg_info->opcode) {
case PCP_OPCODE_ANNOUNCE:
/* should check PCP Client's IP Address in request */
/* see http://tools.ietf.org/html/rfc6887#section-14.1 */
break;
case PCP_OPCODE_MAP:
remainingSize -= PCP_MAP_V2_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printMAPOpcodeVersion2(req);
#endif /* DEBUG */
parsePCPMAP_version2(req, pcp_msg_info);
req += PCP_MAP_V2_SIZE;
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPMap(pcp_msg_info);
} else {
CreatePCPMap(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v2 MAP message.");
return pcp_msg_info->result_code;
}
break;
#ifdef PCP_PEER
case PCP_OPCODE_PEER:
remainingSize -= PCP_PEER_V2_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printPEEROpcodeVersion2(req);
#endif /* DEBUG */
parsePCPPEER_version2(req, pcp_msg_info);
req += PCP_PEER_V2_SIZE;
if (pcp_msg_info->result_code != 0) {
return pcp_msg_info->result_code;
}
parsePCPOptions(req, remainingSize, pcp_msg_info);
if (ValidatePCPMsg(pcp_msg_info)) {
if (pcp_msg_info->lifetime == 0) {
DeletePCPPeer(pcp_msg_info);
} else {
CreatePCPPeer(pcp_msg_info);
}
} else {
syslog(LOG_ERR, "PCP: Invalid PCP v2 PEER message.");
}
break;
#endif /* PCP_PEER */
#ifdef PCP_SADSCP
case PCP_OPCODE_SADSCP:
remainingSize -= PCP_SADSCP_REQ_SIZE;
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_REQUEST;
return pcp_msg_info->result_code;
}
remainingSize -= ((uint8_t *)req)[13]; /* app_name_length */
if (remainingSize < 0) {
pcp_msg_info->result_code = PCP_ERR_MALFORMED_OPTION;
return pcp_msg_info->result_code;
}
#ifdef DEBUG
printSADSCPOpcode(req);
#endif
parseSADSCP(req, pcp_msg_info);
req += PCP_SADSCP_REQ_SIZE;
if (pcp_msg_info->result_code != 0) {
return pcp_msg_info->result_code;
}
req += pcp_msg_info->app_name_len;
get_dscp_value(pcp_msg_info);
break;
#endif
default:
pcp_msg_info->result_code = PCP_ERR_UNSUPP_OPCODE;
break;
}
} else {
pcp_msg_info->result_code = PCP_ERR_UNSUPP_VERSION;
return pcp_msg_info->result_code;
}
return 1;
}
static void createPCPResponse(unsigned char *response, const pcp_info_t *pcp_msg_info)
{
response[2] = 0; /* reserved */
memset(response + 12, 0, 12); /* reserved */
if (pcp_msg_info->result_code == PCP_ERR_UNSUPP_VERSION ) {
/* highest supported version */
response[0] = this_server_info.server_version;
} else {
response[0] = pcp_msg_info->version;
}
response[1] = pcp_msg_info->opcode | 0x80; /* r_opcode */
response[3] = pcp_msg_info->result_code;
if(epoch_origin == 0) {
epoch_origin = startup_time;
}
WRITENU32(response + 8, upnp_time() - epoch_origin); /* epochtime */
switch (pcp_msg_info->result_code) {
/*long lifetime errors*/
case PCP_ERR_UNSUPP_VERSION:
case PCP_ERR_NOT_AUTHORIZED:
case PCP_ERR_MALFORMED_REQUEST:
case PCP_ERR_UNSUPP_OPCODE:
case PCP_ERR_UNSUPP_OPTION:
case PCP_ERR_MALFORMED_OPTION:
case PCP_ERR_UNSUPP_PROTOCOL:
case PCP_ERR_ADDRESS_MISMATCH:
case PCP_ERR_CANNOT_PROVIDE_EXTERNAL:
case PCP_ERR_EXCESSIVE_REMOTE_PEERS:
WRITENU32(response + 4, 0); /* lifetime */
break;
case PCP_ERR_NETWORK_FAILURE:
case PCP_ERR_NO_RESOURCES:
case PCP_ERR_USER_EX_QUOTA:
WRITENU32(response + 4, 30); /* lifetime */
break;
case PCP_SUCCESS:
default:
WRITENU32(response + 4, pcp_msg_info->lifetime); /* lifetime */
break;
}
if (response[1] == 0x81) { /* MAP response */
if (response[0] == 1) { /* version */
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 4, pcp_msg_info->int_port);
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 6, pcp_msg_info->ext_port);
copyIPv6IfDifferent(response + PCP_COMMON_RESPONSE_SIZE + 8,
pcp_msg_info->ext_ip);
}
else if (response[0] == 2) {
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 16, pcp_msg_info->int_port);
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 18, pcp_msg_info->ext_port);
copyIPv6IfDifferent(response + PCP_COMMON_RESPONSE_SIZE + 20,
pcp_msg_info->ext_ip);
}
}
#ifdef PCP_PEER
else if (response[1] == 0x82) { /* PEER response */
if (response[0] == 1) {
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 4, pcp_msg_info->int_port);
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 6, pcp_msg_info->ext_port);
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 24, pcp_msg_info->peer_port);
copyIPv6IfDifferent(response + PCP_COMMON_RESPONSE_SIZE + 8,
pcp_msg_info->ext_ip);
}
else if (response[0] == 2) {
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 16, pcp_msg_info->int_port);
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 18, pcp_msg_info->ext_port);
WRITENU16(response + PCP_COMMON_RESPONSE_SIZE + 36, pcp_msg_info->peer_port);
copyIPv6IfDifferent(response + PCP_COMMON_RESPONSE_SIZE + 20,
pcp_msg_info->ext_ip);
}
}
#endif /* PCP_PEER */
#ifdef PCP_SADSCP
else if (response[1] == 0x83) { /*SADSCP response*/
response[PCP_COMMON_RESPONSE_SIZE + 12]
= ((pcp_msg_info->matched_name<<7) & ~(1<<6)) |
(pcp_msg_info->sadscp_dscp & PCP_SADSCP_MASK);
memset(response + PCP_COMMON_RESPONSE_SIZE + 13, 0, 3);
}
#endif /* PCP_SADSCP */
}
int ProcessIncomingPCPPacket(int s, unsigned char *buff, int len,
const struct sockaddr *senderaddr,
const struct sockaddr_in6 *receiveraddr)
{
pcp_info_t pcp_msg_info;
struct lan_addr_s * lan_addr;
char addr_str[64];
memset(&pcp_msg_info, 0, sizeof(pcp_info_t));
if(senderaddr->sa_family == AF_INET) {
const struct sockaddr_in * senderaddr_v4 =
(const struct sockaddr_in *)senderaddr;
pcp_msg_info.sender_ip.s6_addr[11] = 0xff;
pcp_msg_info.sender_ip.s6_addr[10] = 0xff;
memcpy(pcp_msg_info.sender_ip.s6_addr+12,
&senderaddr_v4->sin_addr, 4);
} else if(senderaddr->sa_family == AF_INET6) {
const struct sockaddr_in6 * senderaddr_v6 =
(const struct sockaddr_in6 *)senderaddr;
pcp_msg_info.sender_ip = senderaddr_v6->sin6_addr;
} else {
syslog(LOG_WARNING, "unknown PCP packet sender address family %d",
senderaddr->sa_family);
return 0;
}
if(sockaddr_to_string(senderaddr, addr_str, sizeof(addr_str)))
syslog(LOG_DEBUG, "PCP request received from %s %dbytes",
addr_str, len);
if(buff[1] & 128) {
/* discarding PCP responses silently */
return 0;
}
/* If we're in allow third party-mode, we probably don't care
* about locality either. Let's hope firewall is ok. */
if (!GETFLAG(PCP_ALLOWTHIRDPARTYMASK)) {
lan_addr = get_lan_for_peer(senderaddr);
if(lan_addr == NULL) {
syslog(LOG_WARNING, "PCP packet sender %s not from a LAN, ignoring",
addr_str);
return 0;
}
}
if (processPCPRequest(buff, len, &pcp_msg_info) ) {
createPCPResponse(buff, &pcp_msg_info);
if(len < PCP_MIN_LEN)
len = PCP_MIN_LEN;
else
len = (len + 3) & ~3; /* round up resp. length to multiple of 4 */
len = sendto_or_schedule2(s, buff, len, 0, senderaddr,
(senderaddr->sa_family == AF_INET) ?
sizeof(struct sockaddr_in) :
sizeof(struct sockaddr_in6),
receiveraddr);
if( len < 0 ) {
syslog(LOG_ERR, "sendto(pcpserver): %m");
}
}
return 0;
}
#ifdef ENABLE_IPV6
int OpenAndConfPCPv6Socket(void)
{
int s;
int i = 1;
struct sockaddr_in6 addr;
s = socket(PF_INET6, SOCK_DGRAM, 0/*IPPROTO_UDP*/);
if(s < 0) {
syslog(LOG_ERR, "%s: socket(): %m", "OpenAndConfPCPv6Socket");
return -1;
}
if(setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &i, sizeof(i)) < 0) {
syslog(LOG_WARNING, "%s: setsockopt(SO_REUSEADDR): %m",
"OpenAndConfPCPv6Socket");
}
#ifdef IPV6_V6ONLY
/* force IPV6 only for IPV6 socket.
* see http://www.ietf.org/rfc/rfc3493.txt section 5.3 */
if(setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, &i, sizeof(i)) < 0) {
syslog(LOG_WARNING, "%s: setsockopt(IPV6_V6ONLY): %m",
"OpenAndConfPCPv6Socket");
}
#endif
#ifdef IPV6_RECVPKTINFO
/* see RFC3542 */
if(setsockopt(s, IPPROTO_IPV6, IPV6_RECVPKTINFO, &i, sizeof(i)) < 0) {
syslog(LOG_WARNING, "%s: setsockopt(IPV6_RECVPKTINFO): %m",
"OpenAndConfPCPv6Socket");
}
#endif
if(!set_non_blocking(s)) {
syslog(LOG_WARNING, "%s: set_non_blocking(): %m",
"OpenAndConfPCPv6Socket");
}
memset(&addr, 0, sizeof(addr));
addr.sin6_family = AF_INET6;
addr.sin6_port = htons(NATPMP_PORT);
addr.sin6_addr = ipv6_bind_addr;
if(bind(s, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
syslog(LOG_ERR, "%s: bind(): %m", "OpenAndConfPCPv6Socket");
close(s);
return -1;
}
return s;
}
#endif /*ENABLE_IPV6*/
#ifdef ENABLE_IPV6
void PCPSendUnsolicitedAnnounce(int * sockets, int n_sockets, int socket6)
#else /* IPv4 only */
void PCPSendUnsolicitedAnnounce(int * sockets, int n_sockets)
#endif
{
int i;
unsigned char buff[PCP_MIN_LEN];
pcp_info_t info;
ssize_t len;
struct sockaddr_in addr;
#ifdef ENABLE_IPV6
struct sockaddr_in6 addr6;
#endif /* ENABLE_IPV6 */
/* this is an Unsolicited ANNOUNCE response */
info.version = this_server_info.server_version;
info.opcode = PCP_OPCODE_ANNOUNCE;
info.result_code = PCP_SUCCESS;
info.lifetime = 0;
createPCPResponse(buff, &info);
/* Multicast PCP restart announcements are sent to
* 224.0.0.1:5350 and/or [ff02::1]:5350 */
memset(&addr, 0, sizeof(struct sockaddr_in));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr("224.0.0.1");
addr.sin_port = htons(5350);
for(i = 0; i < n_sockets; i++) {
len = sendto_or_schedule(sockets[i], buff, PCP_MIN_LEN, 0, (struct sockaddr *)&addr, sizeof(struct sockaddr_in));
if( len < 0 ) {
syslog(LOG_ERR, "PCPSendUnsolicitedAnnounce() sendto(): %m");
}
}
#ifdef ENABLE_IPV6
memset(&addr6, 0, sizeof(struct sockaddr_in6));
addr6.sin6_family = AF_INET6;
inet_pton(AF_INET6, "FF02::1", &(addr6.sin6_addr));
addr6.sin6_port = htons(5350);
len = sendto_or_schedule(socket6, buff, PCP_MIN_LEN, 0, (struct sockaddr *)&addr6, sizeof(struct sockaddr_in6));
if( len < 0 ) {
syslog(LOG_ERR, "PCPSendUnsolicitedAnnounce() IPv6 sendto(): %m");
}
#endif /* ENABLE_IPV6 */
}
#ifdef ENABLE_IPV6
void PCPPublicAddressChanged(int * sockets, int n_sockets, int socket6)
#else /* IPv4 only */
void PCPPublicAddressChanged(int * sockets, int n_sockets)
#endif
{
/* according to RFC 6887 8.5 :
* if the external IP address(es) of the NAT (controlled by
* the PCP server) changes, the Epoch time MUST be reset. */
epoch_origin = upnp_time();
#ifdef ENABLE_IPV6
PCPSendUnsolicitedAnnounce(sockets, n_sockets, socket6);
#else /* IPv4 Only */
PCPSendUnsolicitedAnnounce(sockets, n_sockets);
#endif
}
#endif /*ENABLE_PCP*/
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_864_0 |
crossvul-cpp_data_bad_324_0 | /************************************************************
* Copyright (c) 1994 by Silicon Graphics Computer Systems, Inc.
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of Silicon Graphics not be
* used in advertising or publicity pertaining to distribution
* of the software without specific prior written permission.
* Silicon Graphics makes no representation about the suitability
* of this software for any purpose. It is provided "as is"
* without any express or implied warranty.
*
* SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
* GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH
* THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
********************************************************/
#include "xkbcomp-priv.h"
#include "text.h"
#include "expr.h"
typedef bool (*IdentLookupFunc)(struct xkb_context *ctx, const void *priv,
xkb_atom_t field, enum expr_value_type type,
unsigned int *val_rtrn);
bool
ExprResolveLhs(struct xkb_context *ctx, const ExprDef *expr,
const char **elem_rtrn, const char **field_rtrn,
ExprDef **index_rtrn)
{
switch (expr->expr.op) {
case EXPR_IDENT:
*elem_rtrn = NULL;
*field_rtrn = xkb_atom_text(ctx, expr->ident.ident);
*index_rtrn = NULL;
return true;
case EXPR_FIELD_REF:
*elem_rtrn = xkb_atom_text(ctx, expr->field_ref.element);
*field_rtrn = xkb_atom_text(ctx, expr->field_ref.field);
*index_rtrn = NULL;
return true;
case EXPR_ARRAY_REF:
*elem_rtrn = xkb_atom_text(ctx, expr->array_ref.element);
*field_rtrn = xkb_atom_text(ctx, expr->array_ref.field);
*index_rtrn = expr->array_ref.entry;
return true;
default:
break;
}
log_wsgo(ctx, "Unexpected operator %d in ResolveLhs\n", expr->expr.op);
return false;
}
static bool
SimpleLookup(struct xkb_context *ctx, const void *priv, xkb_atom_t field,
enum expr_value_type type, unsigned int *val_rtrn)
{
const LookupEntry *entry;
const char *str;
if (!priv || field == XKB_ATOM_NONE || type != EXPR_TYPE_INT)
return false;
str = xkb_atom_text(ctx, field);
for (entry = priv; entry && entry->name; entry++) {
if (istreq(str, entry->name)) {
*val_rtrn = entry->value;
return true;
}
}
return false;
}
/* Data passed in the *priv argument for LookupModMask. */
typedef struct {
const struct xkb_mod_set *mods;
enum mod_type mod_type;
} LookupModMaskPriv;
static bool
LookupModMask(struct xkb_context *ctx, const void *priv, xkb_atom_t field,
enum expr_value_type type, xkb_mod_mask_t *val_rtrn)
{
const char *str;
xkb_mod_index_t ndx;
const LookupModMaskPriv *arg = priv;
const struct xkb_mod_set *mods = arg->mods;
enum mod_type mod_type = arg->mod_type;
if (type != EXPR_TYPE_INT)
return false;
str = xkb_atom_text(ctx, field);
if (istreq(str, "all")) {
*val_rtrn = MOD_REAL_MASK_ALL;
return true;
}
if (istreq(str, "none")) {
*val_rtrn = 0;
return true;
}
ndx = XkbModNameToIndex(mods, field, mod_type);
if (ndx == XKB_MOD_INVALID)
return false;
*val_rtrn = (1u << ndx);
return true;
}
bool
ExprResolveBoolean(struct xkb_context *ctx, const ExprDef *expr,
bool *set_rtrn)
{
bool ok = false;
const char *ident;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_BOOLEAN) {
log_err(ctx,
"Found constant of type %s where boolean was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*set_rtrn = expr->boolean.set;
return true;
case EXPR_IDENT:
ident = xkb_atom_text(ctx, expr->ident.ident);
if (ident) {
if (istreq(ident, "true") ||
istreq(ident, "yes") ||
istreq(ident, "on")) {
*set_rtrn = true;
return true;
}
else if (istreq(ident, "false") ||
istreq(ident, "no") ||
istreq(ident, "off")) {
*set_rtrn = false;
return true;
}
}
log_err(ctx, "Identifier \"%s\" of type boolean is unknown\n", ident);
return false;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type boolean is unknown\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_INVERT:
case EXPR_NOT:
ok = ExprResolveBoolean(ctx, expr->unary.child, set_rtrn);
if (ok)
*set_rtrn = !*set_rtrn;
return ok;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
case EXPR_ASSIGN:
case EXPR_NEGATE:
case EXPR_UNARY_PLUS:
log_err(ctx, "%s of boolean values not permitted\n",
expr_op_type_to_string(expr->expr.op));
break;
default:
log_wsgo(ctx, "Unknown operator %d in ResolveBoolean\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveKeyCode(struct xkb_context *ctx, const ExprDef *expr,
xkb_keycode_t *kc)
{
xkb_keycode_t leftRtrn, rightRtrn;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_INT) {
log_err(ctx,
"Found constant of type %s where an int was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*kc = (xkb_keycode_t) expr->integer.ival;
return true;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
if (!ExprResolveKeyCode(ctx, expr->binary.left, &leftRtrn) ||
!ExprResolveKeyCode(ctx, expr->binary.right, &rightRtrn))
return false;
switch (expr->expr.op) {
case EXPR_ADD:
*kc = leftRtrn + rightRtrn;
break;
case EXPR_SUBTRACT:
*kc = leftRtrn - rightRtrn;
break;
case EXPR_MULTIPLY:
*kc = leftRtrn * rightRtrn;
break;
case EXPR_DIVIDE:
if (rightRtrn == 0) {
log_err(ctx, "Cannot divide by zero: %d / %d\n",
leftRtrn, rightRtrn);
return false;
}
*kc = leftRtrn / rightRtrn;
break;
default:
break;
}
return true;
case EXPR_NEGATE:
if (!ExprResolveKeyCode(ctx, expr->unary.child, &leftRtrn))
return false;
*kc = ~leftRtrn;
return true;
case EXPR_UNARY_PLUS:
return ExprResolveKeyCode(ctx, expr->unary.child, kc);
default:
log_wsgo(ctx, "Unknown operator %d in ResolveKeyCode\n",
expr->expr.op);
break;
}
return false;
}
/**
* This function returns ... something. It's a bit of a guess, really.
*
* If an integer is given in value ctx, it will be returned in ival.
* If an ident or field reference is given, the lookup function (if given)
* will be called. At the moment, only SimpleLookup use this, and they both
* return the results in uval. And don't support field references.
*
* Cool.
*/
static bool
ExprResolveIntegerLookup(struct xkb_context *ctx, const ExprDef *expr,
int *val_rtrn, IdentLookupFunc lookup,
const void *lookupPriv)
{
bool ok = false;
int l, r;
unsigned u;
ExprDef *left, *right;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_INT) {
log_err(ctx,
"Found constant of type %s where an int was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*val_rtrn = expr->integer.ival;
return true;
case EXPR_IDENT:
if (lookup)
ok = lookup(ctx, lookupPriv, expr->ident.ident, EXPR_TYPE_INT, &u);
if (!ok)
log_err(ctx, "Identifier \"%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->ident.ident));
else
*val_rtrn = (int) u;
return ok;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
left = expr->binary.left;
right = expr->binary.right;
if (!ExprResolveIntegerLookup(ctx, left, &l, lookup, lookupPriv) ||
!ExprResolveIntegerLookup(ctx, right, &r, lookup, lookupPriv))
return false;
switch (expr->expr.op) {
case EXPR_ADD:
*val_rtrn = l + r;
break;
case EXPR_SUBTRACT:
*val_rtrn = l - r;
break;
case EXPR_MULTIPLY:
*val_rtrn = l * r;
break;
case EXPR_DIVIDE:
if (r == 0) {
log_err(ctx, "Cannot divide by zero: %d / %d\n", l, r);
return false;
}
*val_rtrn = l / r;
break;
default:
log_err(ctx, "%s of integers not permitted\n",
expr_op_type_to_string(expr->expr.op));
return false;
}
return true;
case EXPR_ASSIGN:
log_wsgo(ctx, "Assignment operator not implemented yet\n");
break;
case EXPR_NOT:
log_err(ctx, "The ! operator cannot be applied to an integer\n");
return false;
case EXPR_INVERT:
case EXPR_NEGATE:
left = expr->unary.child;
if (!ExprResolveIntegerLookup(ctx, left, &l, lookup, lookupPriv))
return false;
*val_rtrn = (expr->expr.op == EXPR_NEGATE ? -l : ~l);
return true;
case EXPR_UNARY_PLUS:
left = expr->unary.child;
return ExprResolveIntegerLookup(ctx, left, val_rtrn, lookup,
lookupPriv);
default:
log_wsgo(ctx, "Unknown operator %d in ResolveInteger\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveInteger(struct xkb_context *ctx, const ExprDef *expr,
int *val_rtrn)
{
return ExprResolveIntegerLookup(ctx, expr, val_rtrn, NULL, NULL);
}
bool
ExprResolveGroup(struct xkb_context *ctx, const ExprDef *expr,
xkb_layout_index_t *group_rtrn)
{
bool ok;
int result;
ok = ExprResolveIntegerLookup(ctx, expr, &result, SimpleLookup,
groupNames);
if (!ok)
return false;
if (result <= 0 || result > XKB_MAX_GROUPS) {
log_err(ctx, "Group index %u is out of range (1..%d)\n",
result, XKB_MAX_GROUPS);
return false;
}
*group_rtrn = (xkb_layout_index_t) result;
return true;
}
bool
ExprResolveLevel(struct xkb_context *ctx, const ExprDef *expr,
xkb_level_index_t *level_rtrn)
{
bool ok;
int result;
ok = ExprResolveIntegerLookup(ctx, expr, &result, SimpleLookup,
levelNames);
if (!ok)
return false;
if (result < 1) {
log_err(ctx, "Shift level %d is out of range\n", result);
return false;
}
/* Level is zero-indexed from now on. */
*level_rtrn = (unsigned int) (result - 1);
return true;
}
bool
ExprResolveButton(struct xkb_context *ctx, const ExprDef *expr, int *btn_rtrn)
{
return ExprResolveIntegerLookup(ctx, expr, btn_rtrn, SimpleLookup,
buttonNames);
}
bool
ExprResolveString(struct xkb_context *ctx, const ExprDef *expr,
xkb_atom_t *val_rtrn)
{
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_STRING) {
log_err(ctx, "Found constant of type %s, expected a string\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*val_rtrn = expr->string.str;
return true;
case EXPR_IDENT:
log_err(ctx, "Identifier \"%s\" of type string not found\n",
xkb_atom_text(ctx, expr->ident.ident));
return false;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type string not found\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
case EXPR_ASSIGN:
case EXPR_NEGATE:
case EXPR_INVERT:
case EXPR_NOT:
case EXPR_UNARY_PLUS:
log_err(ctx, "%s of strings not permitted\n",
expr_op_type_to_string(expr->expr.op));
return false;
default:
log_wsgo(ctx, "Unknown operator %d in ResolveString\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveEnum(struct xkb_context *ctx, const ExprDef *expr,
unsigned int *val_rtrn, const LookupEntry *values)
{
if (expr->expr.op != EXPR_IDENT) {
log_err(ctx, "Found a %s where an enumerated value was expected\n",
expr_op_type_to_string(expr->expr.op));
return false;
}
if (!SimpleLookup(ctx, values, expr->ident.ident, EXPR_TYPE_INT,
val_rtrn)) {
log_err(ctx, "Illegal identifier %s; expected one of:\n",
xkb_atom_text(ctx, expr->ident.ident));
while (values && values->name)
{
log_err(ctx, "\t%s\n", values->name);
values++;
}
return false;
}
return true;
}
static bool
ExprResolveMaskLookup(struct xkb_context *ctx, const ExprDef *expr,
unsigned int *val_rtrn, IdentLookupFunc lookup,
const void *lookupPriv)
{
bool ok = false;
unsigned int l = 0, r = 0;
int v;
ExprDef *left, *right;
const char *bogus = NULL;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_INT) {
log_err(ctx,
"Found constant of type %s where a mask was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*val_rtrn = (unsigned int) expr->integer.ival;
return true;
case EXPR_IDENT:
ok = lookup(ctx, lookupPriv, expr->ident.ident, EXPR_TYPE_INT,
val_rtrn);
if (!ok)
log_err(ctx, "Identifier \"%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->ident.ident));
return ok;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_ARRAY_REF:
bogus = "array reference";
/* fallthrough */
case EXPR_ACTION_DECL:
if (bogus == NULL)
bogus = "function use";
log_err(ctx,
"Unexpected %s in mask expression; Expression Ignored\n",
bogus);
return false;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
left = expr->binary.left;
right = expr->binary.right;
if (!ExprResolveMaskLookup(ctx, left, &l, lookup, lookupPriv) ||
!ExprResolveMaskLookup(ctx, right, &r, lookup, lookupPriv))
return false;
switch (expr->expr.op) {
case EXPR_ADD:
*val_rtrn = l | r;
break;
case EXPR_SUBTRACT:
*val_rtrn = l & (~r);
break;
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
log_err(ctx, "Cannot %s masks; Illegal operation ignored\n",
(expr->expr.op == EXPR_DIVIDE ? "divide" : "multiply"));
return false;
default:
break;
}
return true;
case EXPR_ASSIGN:
log_wsgo(ctx, "Assignment operator not implemented yet\n");
break;
case EXPR_INVERT:
left = expr->unary.child;
if (!ExprResolveIntegerLookup(ctx, left, &v, lookup, lookupPriv))
return false;
*val_rtrn = ~v;
return true;
case EXPR_UNARY_PLUS:
case EXPR_NEGATE:
case EXPR_NOT:
left = expr->unary.child;
if (!ExprResolveIntegerLookup(ctx, left, &v, lookup, lookupPriv))
log_err(ctx, "The %s operator cannot be used with a mask\n",
(expr->expr.op == EXPR_NEGATE ? "-" : "!"));
return false;
default:
log_wsgo(ctx, "Unknown operator %d in ResolveMask\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveMask(struct xkb_context *ctx, const ExprDef *expr,
unsigned int *mask_rtrn, const LookupEntry *values)
{
return ExprResolveMaskLookup(ctx, expr, mask_rtrn, SimpleLookup, values);
}
bool
ExprResolveModMask(struct xkb_context *ctx, const ExprDef *expr,
enum mod_type mod_type, const struct xkb_mod_set *mods,
xkb_mod_mask_t *mask_rtrn)
{
LookupModMaskPriv priv = { .mods = mods, .mod_type = mod_type };
return ExprResolveMaskLookup(ctx, expr, mask_rtrn, LookupModMask, &priv);
}
bool
ExprResolveKeySym(struct xkb_context *ctx, const ExprDef *expr,
xkb_keysym_t *sym_rtrn)
{
int val;
if (expr->expr.op == EXPR_IDENT) {
const char *str = xkb_atom_text(ctx, expr->ident.ident);
*sym_rtrn = xkb_keysym_from_name(str, 0);
if (*sym_rtrn != XKB_KEY_NoSymbol)
return true;
}
if (!ExprResolveInteger(ctx, expr, &val))
return false;
if (val < 0 || val >= 10)
return false;
*sym_rtrn = XKB_KEY_0 + (xkb_keysym_t) val;
return true;
}
bool
ExprResolveMod(struct xkb_context *ctx, const ExprDef *def,
enum mod_type mod_type, const struct xkb_mod_set *mods,
xkb_mod_index_t *ndx_rtrn)
{
xkb_mod_index_t ndx;
xkb_atom_t name;
if (def->expr.op != EXPR_IDENT) {
log_err(ctx,
"Cannot resolve virtual modifier: "
"found %s where a virtual modifier name was expected\n",
expr_op_type_to_string(def->expr.op));
return false;
}
name = def->ident.ident;
ndx = XkbModNameToIndex(mods, name, mod_type);
if (ndx == XKB_MOD_INVALID) {
log_err(ctx,
"Cannot resolve virtual modifier: "
"\"%s\" was not previously declared\n",
xkb_atom_text(ctx, name));
return false;
}
*ndx_rtrn = ndx;
return true;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_324_0 |
crossvul-cpp_data_bad_2852_0 | /* Userspace key control operations
*
* Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/capability.h>
#include <linux/cred.h>
#include <linux/string.h>
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/uio.h>
#include <linux/uaccess.h>
#include "internal.h"
#define KEY_MAX_DESC_SIZE 4096
static int key_get_type_from_user(char *type,
const char __user *_type,
unsigned len)
{
int ret;
ret = strncpy_from_user(type, _type, len);
if (ret < 0)
return ret;
if (ret == 0 || ret >= len)
return -EINVAL;
if (type[0] == '.')
return -EPERM;
type[len - 1] = '\0';
return 0;
}
/*
* Extract the description of a new key from userspace and either add it as a
* new key to the specified keyring or update a matching key in that keyring.
*
* If the description is NULL or an empty string, the key type is asked to
* generate one from the payload.
*
* The keyring must be writable so that we can attach the key to it.
*
* If successful, the new key's serial number is returned, otherwise an error
* code is returned.
*/
SYSCALL_DEFINE5(add_key, const char __user *, _type,
const char __user *, _description,
const void __user *, _payload,
size_t, plen,
key_serial_t, ringid)
{
key_ref_t keyring_ref, key_ref;
char type[32], *description;
void *payload;
long ret;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* draw all the data into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = NULL;
if (_description) {
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
if (!*description) {
kfree(description);
description = NULL;
} else if ((description[0] == '.') &&
(strncmp(type, "keyring", 7) == 0)) {
ret = -EPERM;
goto error2;
}
}
/* pull the payload in if one was supplied */
payload = NULL;
if (_payload) {
ret = -ENOMEM;
payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error2;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error3;
}
/* find the target keyring (which must be writable) */
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
}
/* create or update the requested key and add it to the target
* keyring */
key_ref = key_create_or_update(keyring_ref, type, description,
payload, plen, KEY_PERM_UNDEF,
KEY_ALLOC_IN_QUOTA);
if (!IS_ERR(key_ref)) {
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
}
else {
ret = PTR_ERR(key_ref);
}
key_ref_put(keyring_ref);
error3:
kvfree(payload);
error2:
kfree(description);
error:
return ret;
}
/*
* Search the process keyrings and keyring trees linked from those for a
* matching key. Keyrings must have appropriate Search permission to be
* searched.
*
* If a key is found, it will be attached to the destination keyring if there's
* one specified and the serial number of the key will be returned.
*
* If no key is found, /sbin/request-key will be invoked if _callout_info is
* non-NULL in an attempt to create a key. The _callout_info string will be
* passed to /sbin/request-key to aid with completing the request. If the
* _callout_info string is "" then it will be changed to "-".
*/
SYSCALL_DEFINE4(request_key, const char __user *, _type,
const char __user *, _description,
const char __user *, _callout_info,
key_serial_t, destringid)
{
struct key_type *ktype;
struct key *key;
key_ref_t dest_ref;
size_t callout_len;
char type[32], *description, *callout_info;
long ret;
/* pull the type into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
/* pull the description into kernel space */
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
/* pull the callout info into kernel space */
callout_info = NULL;
callout_len = 0;
if (_callout_info) {
callout_info = strndup_user(_callout_info, PAGE_SIZE);
if (IS_ERR(callout_info)) {
ret = PTR_ERR(callout_info);
goto error2;
}
callout_len = strlen(callout_info);
}
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
}
}
/* find the key type */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
ret = PTR_ERR(ktype);
goto error4;
}
/* do the search */
key = request_key_and_link(ktype, description, callout_info,
callout_len, NULL, key_ref_to_ptr(dest_ref),
KEY_ALLOC_IN_QUOTA);
if (IS_ERR(key)) {
ret = PTR_ERR(key);
goto error5;
}
/* wait for the key to finish being constructed */
ret = wait_for_key_construction(key, 1);
if (ret < 0)
goto error6;
ret = key->serial;
error6:
key_put(key);
error5:
key_type_put(ktype);
error4:
key_ref_put(dest_ref);
error3:
kfree(callout_info);
error2:
kfree(description);
error:
return ret;
}
/*
* Get the ID of the specified process keyring.
*
* The requested keyring must have search permission to be found.
*
* If successful, the ID of the requested keyring will be returned.
*/
long keyctl_get_keyring_ID(key_serial_t id, int create)
{
key_ref_t key_ref;
unsigned long lflags;
long ret;
lflags = create ? KEY_LOOKUP_CREATE : 0;
key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
error:
return ret;
}
/*
* Join a (named) session keyring.
*
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
* be skipped over. It is not permitted for userspace to create or join
* keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
long keyctl_join_session_keyring(const char __user *_name)
{
char *name;
long ret;
/* fetch the name from userspace */
name = NULL;
if (_name) {
name = strndup_user(_name, KEY_MAX_DESC_SIZE);
if (IS_ERR(name)) {
ret = PTR_ERR(name);
goto error;
}
ret = -EPERM;
if (name[0] == '.')
goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
error_name:
kfree(name);
error:
return ret;
}
/*
* Update a key's data payload from the given data.
*
* The key must grant the caller Write permission and the key type must support
* updating for this to work. A negative key can be positively instantiated
* with this call.
*
* If successful, 0 will be returned. If the key type does not support
* updating, then -EOPNOTSUPP will be returned.
*/
long keyctl_update_key(key_serial_t id,
const void __user *_payload,
size_t plen)
{
key_ref_t key_ref;
void *payload;
long ret;
ret = -EINVAL;
if (plen > PAGE_SIZE)
goto error;
/* pull the payload in if one was supplied */
payload = NULL;
if (_payload) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload)
goto error;
ret = -EFAULT;
if (copy_from_user(payload, _payload, plen) != 0)
goto error2;
}
/* find the target key (which must be writable) */
key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
/* update the key */
ret = key_update(key_ref, payload, plen);
key_ref_put(key_ref);
error2:
kfree(payload);
error:
return ret;
}
/*
* Revoke a key.
*
* The key must be grant the caller Write or Setattr permission for this to
* work. The key type should give up its quota claim when revoked. The key
* and any links to the key will be automatically garbage collected after a
* certain amount of time (/proc/sys/kernel/keys/gc_delay).
*
* Keys with KEY_FLAG_KEEP set should not be revoked.
*
* If successful, 0 is returned.
*/
long keyctl_revoke_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
if (ret != -EACCES)
goto error;
key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
}
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_revoke(key);
key_ref_put(key_ref);
error:
return ret;
}
/*
* Invalidate a key.
*
* The key must be grant the caller Invalidate permission for this to work.
* The key and any links to the key will be automatically garbage collected
* immediately.
*
* Keys with KEY_FLAG_KEEP set should not be invalidated.
*
* If successful, 0 is returned.
*/
long keyctl_invalidate_key(key_serial_t id)
{
key_ref_t key_ref;
struct key *key;
long ret;
kenter("%d", id);
key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
/* Root is permitted to invalidate certain special keys */
if (capable(CAP_SYS_ADMIN)) {
key_ref = lookup_user_key(id, 0, 0);
if (IS_ERR(key_ref))
goto error;
if (test_bit(KEY_FLAG_ROOT_CAN_INVAL,
&key_ref_to_ptr(key_ref)->flags))
goto invalidate;
goto error_put;
}
goto error;
}
invalidate:
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_invalidate(key);
error_put:
key_ref_put(key_ref);
error:
kleave(" = %ld", ret);
return ret;
}
/*
* Clear the specified keyring, creating an empty process keyring if one of the
* special keyring IDs is used.
*
* The keyring must grant the caller Write permission and not have
* KEY_FLAG_KEEP set for this to work. If successful, 0 will be returned.
*/
long keyctl_keyring_clear(key_serial_t ringid)
{
key_ref_t keyring_ref;
struct key *keyring;
long ret;
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
/* Root is permitted to invalidate certain special keyrings */
if (capable(CAP_SYS_ADMIN)) {
keyring_ref = lookup_user_key(ringid, 0, 0);
if (IS_ERR(keyring_ref))
goto error;
if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
&key_ref_to_ptr(keyring_ref)->flags))
goto clear;
goto error_put;
}
goto error;
}
clear:
keyring = key_ref_to_ptr(keyring_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
ret = -EPERM;
else
ret = keyring_clear(keyring);
error_put:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Create a link from a keyring to a key if there's no matching key in the
* keyring, otherwise replace the link to the matching key with a link to the
* new key.
*
* The key must grant the caller Link permission and the the keyring must grant
* the caller Write permission. Furthermore, if an additional link is created,
* the keyring's quota will be extended.
*
* If successful, 0 will be returned.
*/
long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
long ret;
keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
error2:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Unlink a key from a keyring.
*
* The keyring must grant the caller Write permission for this to work; the key
* itself need not grant the caller anything. If the last link to a key is
* removed then that key will be scheduled for destruction.
*
* Keys or keyrings with KEY_FLAG_KEEP set should not be unlinked.
*
* If successful, 0 will be returned.
*/
long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
struct key *keyring, *key;
long ret;
keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
key_ref = lookup_user_key(id, KEY_LOOKUP_FOR_UNLINK, 0);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
}
keyring = key_ref_to_ptr(keyring_ref);
key = key_ref_to_ptr(key_ref);
if (test_bit(KEY_FLAG_KEEP, &keyring->flags) &&
test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
ret = key_unlink(keyring, key);
key_ref_put(key_ref);
error2:
key_ref_put(keyring_ref);
error:
return ret;
}
/*
* Return a description of a key to userspace.
*
* The key must grant the caller View permission for this to work.
*
* If there's a buffer, we place up to buflen bytes of data into it formatted
* in the following way:
*
* type;uid;gid;perm;description<NUL>
*
* If successful, we return the amount of description available, irrespective
* of how much we may have copied into the buffer.
*/
long keyctl_describe_key(key_serial_t keyid,
char __user *buffer,
size_t buflen)
{
struct key *key, *instkey;
key_ref_t key_ref;
char *infobuf;
long ret;
int desclen, infolen;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
/* viewing a key under construction is permitted if we have the
* authorisation token handy */
if (PTR_ERR(key_ref) == -EACCES) {
instkey = key_get_instantiation_authkey(keyid);
if (!IS_ERR(instkey)) {
key_put(instkey);
key_ref = lookup_user_key(keyid,
KEY_LOOKUP_PARTIAL,
0);
if (!IS_ERR(key_ref))
goto okay;
}
}
ret = PTR_ERR(key_ref);
goto error;
}
okay:
key = key_ref_to_ptr(key_ref);
desclen = strlen(key->description);
/* calculate how much information we're going to return */
ret = -ENOMEM;
infobuf = kasprintf(GFP_KERNEL,
"%s;%d;%d;%08x;",
key->type->name,
from_kuid_munged(current_user_ns(), key->uid),
from_kgid_munged(current_user_ns(), key->gid),
key->perm);
if (!infobuf)
goto error2;
infolen = strlen(infobuf);
ret = infolen + desclen + 1;
/* consider returning the data */
if (buffer && buflen >= ret) {
if (copy_to_user(buffer, infobuf, infolen) != 0 ||
copy_to_user(buffer + infolen, key->description,
desclen + 1) != 0)
ret = -EFAULT;
}
kfree(infobuf);
error2:
key_ref_put(key_ref);
error:
return ret;
}
/*
* Search the specified keyring and any keyrings it links to for a matching
* key. Only keyrings that grant the caller Search permission will be searched
* (this includes the starting keyring). Only keys with Search permission can
* be found.
*
* If successful, the found key will be linked to the destination keyring if
* supplied and the key has Link permission, and the found key ID will be
* returned.
*/
long keyctl_keyring_search(key_serial_t ringid,
const char __user *_type,
const char __user *_description,
key_serial_t destringid)
{
struct key_type *ktype;
key_ref_t keyring_ref, key_ref, dest_ref;
char type[32], *description;
long ret;
/* pull the type and description into kernel space */
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
}
/* get the keyring at which to begin the search */
keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error2;
}
/* get the destination keyring if specified */
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
}
}
/* find the key type */
ktype = key_type_lookup(type);
if (IS_ERR(ktype)) {
ret = PTR_ERR(ktype);
goto error4;
}
/* do the search */
key_ref = keyring_search(keyring_ref, ktype, description);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
/* treat lack or presence of a negative key the same */
if (ret == -EAGAIN)
ret = -ENOKEY;
goto error5;
}
/* link the resulting key to the destination keyring if we can */
if (dest_ref) {
ret = key_permission(key_ref, KEY_NEED_LINK);
if (ret < 0)
goto error6;
ret = key_link(key_ref_to_ptr(dest_ref), key_ref_to_ptr(key_ref));
if (ret < 0)
goto error6;
}
ret = key_ref_to_ptr(key_ref)->serial;
error6:
key_ref_put(key_ref);
error5:
key_type_put(ktype);
error4:
key_ref_put(dest_ref);
error3:
key_ref_put(keyring_ref);
error2:
kfree(description);
error:
return ret;
}
/*
* Read a key's payload.
*
* The key must either grant the caller Read permission, or it must grant the
* caller Search permission when searched for from the process keyrings.
*
* If successful, we place up to buflen bytes of data into the buffer, if one
* is provided, and return the amount of data that is available in the key,
* irrespective of how much we copied into the buffer.
*/
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
{
struct key *key;
key_ref_t key_ref;
long ret;
/* find the key first */
key_ref = lookup_user_key(keyid, 0, 0);
if (IS_ERR(key_ref)) {
ret = -ENOKEY;
goto error;
}
key = key_ref_to_ptr(key_ref);
/* see if we can read it directly */
ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
goto error;
/* we can't; see if it's searchable from this process's keyrings
* - we automatically take account of the fact that it may be
* dangling off an instantiation key
*/
if (!is_key_possessed(key_ref)) {
ret = -EACCES;
goto error2;
}
/* the key is probably readable - now try to read it */
can_read_key:
ret = -EOPNOTSUPP;
if (key->type->read) {
/* Read the data with the semaphore held (since we might sleep)
* to protect against the key being updated or revoked.
*/
down_read(&key->sem);
ret = key_validate(key);
if (ret == 0)
ret = key->type->read(key, buffer, buflen);
up_read(&key->sem);
}
error2:
key_put(key);
error:
return ret;
}
/*
* Change the ownership of a key
*
* The key must grant the caller Setattr permission for this to work, though
* the key need not be fully instantiated yet. For the UID to be changed, or
* for the GID to be changed to a group the caller is not a member of, the
* caller must have sysadmin capability. If either uid or gid is -1 then that
* attribute is not changed.
*
* If the UID is to be changed, the new user must have sufficient quota to
* accept the key. The quota deduction will be removed from the old user to
* the new user should the attribute be changed.
*
* If successful, 0 will be returned.
*/
long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
kuid_t uid;
kgid_t gid;
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);
ret = -EINVAL;
if ((user != (uid_t) -1) && !uid_valid(uid))
goto error;
if ((group != (gid_t) -1) && !gid_valid(gid))
goto error;
ret = 0;
if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chown races */
ret = -EACCES;
down_write(&key->sem);
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
if (newowner->qnkeys + 1 >= maxkeys ||
newowner->qnbytes + key->quotalen >= maxbytes ||
newowner->qnbytes + key->quotalen <
newowner->qnbytes)
goto quota_overrun;
newowner->qnkeys++;
newowner->qnbytes += key->quotalen;
spin_unlock(&newowner->lock);
spin_lock(&key->user->lock);
key->user->qnkeys--;
key->user->qnbytes -= key->quotalen;
spin_unlock(&key->user->lock);
}
atomic_dec(&key->user->nkeys);
atomic_inc(&newowner->nkeys);
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
atomic_dec(&key->user->nikeys);
atomic_inc(&newowner->nikeys);
}
zapowner = key->user;
key->user = newowner;
key->uid = uid;
}
/* change the GID */
if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
error_put:
up_write(&key->sem);
key_put(key);
if (zapowner)
key_user_put(zapowner);
error:
return ret;
quota_overrun:
spin_unlock(&newowner->lock);
zapowner = newowner;
ret = -EDQUOT;
goto error_put;
}
/*
* Change the permission mask on a key.
*
* The key must grant the caller Setattr permission for this to work, though
* the key need not be fully instantiated yet. If the caller does not have
* sysadmin capability, it may only change the permission on keys that it owns.
*/
long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
{
struct key *key;
key_ref_t key_ref;
long ret;
ret = -EINVAL;
if (perm & ~(KEY_POS_ALL | KEY_USR_ALL | KEY_GRP_ALL | KEY_OTH_ALL))
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
}
key = key_ref_to_ptr(key_ref);
/* make the changes with the locks held to prevent chown/chmod races */
ret = -EACCES;
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
key->perm = perm;
ret = 0;
}
up_write(&key->sem);
key_put(key);
error:
return ret;
}
/*
* Get the destination keyring for instantiation and check that the caller has
* Write permission on it.
*/
static long get_instantiation_keyring(key_serial_t ringid,
struct request_key_auth *rka,
struct key **_dest_keyring)
{
key_ref_t dkref;
*_dest_keyring = NULL;
/* just return a NULL pointer if we weren't asked to make a link */
if (ringid == 0)
return 0;
/* if a specific keyring is nominated by ID, then use that */
if (ringid > 0) {
dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(dkref))
return PTR_ERR(dkref);
*_dest_keyring = key_ref_to_ptr(dkref);
return 0;
}
if (ringid == KEY_SPEC_REQKEY_AUTH_KEY)
return -EINVAL;
/* otherwise specify the destination keyring recorded in the
* authorisation key (any KEY_SPEC_*_KEYRING) */
if (ringid >= KEY_SPEC_REQUESTOR_KEYRING) {
*_dest_keyring = key_get(rka->dest_keyring);
return 0;
}
return -ENOKEY;
}
/*
* Change the request_key authorisation key on the current process.
*/
static int keyctl_change_reqkey_auth(struct key *key)
{
struct cred *new;
new = prepare_creds();
if (!new)
return -ENOMEM;
key_put(new->request_key_auth);
new->request_key_auth = key_get(key);
return commit_creds(new);
}
/*
* Instantiate a key with the specified payload and link the key into the
* destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key_common(key_serial_t id,
struct iov_iter *from,
key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
size_t plen = from ? iov_iter_count(from) : 0;
void *payload;
long ret;
kenter("%d,,%zu,%d", id, plen, ringid);
if (!plen)
from = NULL;
ret = -EINVAL;
if (plen > 1024 * 1024 - 1)
goto error;
/* the appropriate instantiation authorisation key must have been
* assumed before calling this */
ret = -EPERM;
instkey = cred->request_key_auth;
if (!instkey)
goto error;
rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
/* pull the payload in if one was supplied */
payload = NULL;
if (from) {
ret = -ENOMEM;
payload = kvmalloc(plen, GFP_KERNEL);
if (!payload)
goto error;
ret = -EFAULT;
if (!copy_from_iter_full(payload, plen, from))
goto error2;
}
/* find the destination keyring amongst those belonging to the
* requesting task */
ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
if (ret < 0)
goto error2;
/* instantiate the key and link it into a keyring */
ret = key_instantiate_and_link(rka->target_key, payload, plen,
dest_keyring, instkey);
key_put(dest_keyring);
/* discard the assumed authority if it's just been disabled by
* instantiation of the key */
if (ret == 0)
keyctl_change_reqkey_auth(NULL);
error2:
kvfree(payload);
error:
return ret;
}
/*
* Instantiate a key with the specified payload and link the key into the
* destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key(key_serial_t id,
const void __user *_payload,
size_t plen,
key_serial_t ringid)
{
if (_payload && plen) {
struct iovec iov;
struct iov_iter from;
int ret;
ret = import_single_range(WRITE, (void __user *)_payload, plen,
&iov, &from);
if (unlikely(ret))
return ret;
return keyctl_instantiate_key_common(id, &from, ringid);
}
return keyctl_instantiate_key_common(id, NULL, ringid);
}
/*
* Instantiate a key with the specified multipart payload and link the key into
* the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* If successful, 0 will be returned.
*/
long keyctl_instantiate_key_iov(key_serial_t id,
const struct iovec __user *_payload_iov,
unsigned ioc,
key_serial_t ringid)
{
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
struct iov_iter from;
long ret;
if (!_payload_iov)
ioc = 0;
ret = import_iovec(WRITE, _payload_iov, ioc,
ARRAY_SIZE(iovstack), &iov, &from);
if (ret < 0)
return ret;
ret = keyctl_instantiate_key_common(id, &from, ringid);
kfree(iov);
return ret;
}
/*
* Negatively instantiate the key with the given timeout (in seconds) and link
* the key into the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* The key and any links to the key will be automatically garbage collected
* after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return -ENOKEY until the negative key expires.
*
* If successful, 0 will be returned.
*/
long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
{
return keyctl_reject_key(id, timeout, ENOKEY, ringid);
}
/*
* Negatively instantiate the key with the given timeout (in seconds) and error
* code and link the key into the destination keyring if one is given.
*
* The caller must have the appropriate instantiation permit set for this to
* work (see keyctl_assume_authority). No other permissions are required.
*
* The key and any links to the key will be automatically garbage collected
* after the timeout expires.
*
* Negative keys are used to rate limit repeated request_key() calls by causing
* them to return the specified error code until the negative key expires.
*
* If successful, 0 will be returned.
*/
long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
long ret;
kenter("%d,%u,%u,%d", id, timeout, error, ringid);
/* must be a valid error code and mustn't be a kernel special */
if (error <= 0 ||
error >= MAX_ERRNO ||
error == ERESTARTSYS ||
error == ERESTARTNOINTR ||
error == ERESTARTNOHAND ||
error == ERESTART_RESTARTBLOCK)
return -EINVAL;
/* the appropriate instantiation authorisation key must have been
* assumed before calling this */
ret = -EPERM;
instkey = cred->request_key_auth;
if (!instkey)
goto error;
rka = instkey->payload.data[0];
if (rka->target_key->serial != id)
goto error;
/* find the destination keyring if present (which must also be
* writable) */
ret = get_instantiation_keyring(ringid, rka, &dest_keyring);
if (ret < 0)
goto error;
/* instantiate the key and link it into a keyring */
ret = key_reject_and_link(rka->target_key, timeout, error,
dest_keyring, instkey);
key_put(dest_keyring);
/* discard the assumed authority if it's just been disabled by
* instantiation of the key */
if (ret == 0)
keyctl_change_reqkey_auth(NULL);
error:
return ret;
}
/*
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
* If a thread or process keyring is specified then it will be created if it
* doesn't yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
struct cred *new;
int ret, old_setting;
old_setting = current_cred_xxx(jit_keyring);
if (reqkey_defl == KEY_REQKEY_DEFL_NO_CHANGE)
return old_setting;
new = prepare_creds();
if (!new)
return -ENOMEM;
switch (reqkey_defl) {
case KEY_REQKEY_DEFL_THREAD_KEYRING:
ret = install_thread_keyring_to_cred(new);
if (ret < 0)
goto error;
goto set;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
if (ret < 0)
goto error;
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_SESSION_KEYRING:
case KEY_REQKEY_DEFL_USER_KEYRING:
case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
case KEY_REQKEY_DEFL_REQUESTOR_KEYRING:
goto set;
case KEY_REQKEY_DEFL_NO_CHANGE:
case KEY_REQKEY_DEFL_GROUP_KEYRING:
default:
ret = -EINVAL;
goto error;
}
set:
new->jit_keyring = reqkey_defl;
commit_creds(new);
return old_setting;
error:
abort_creds(new);
return ret;
}
/*
* Set or clear the timeout on a key.
*
* Either the key must grant the caller Setattr permission or else the caller
* must hold an instantiation authorisation token for the key.
*
* The timeout is either 0 to clear the timeout, or a number of seconds from
* the current time. The key and any links to the key will be automatically
* garbage collected after the timeout expires.
*
* Keys with KEY_FLAG_KEEP set should not be timed out.
*
* If successful, 0 is returned.
*/
long keyctl_set_timeout(key_serial_t id, unsigned timeout)
{
struct key *key, *instkey;
key_ref_t key_ref;
long ret;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
/* setting the timeout on a key under construction is permitted
* if we have the authorisation token handy */
if (PTR_ERR(key_ref) == -EACCES) {
instkey = key_get_instantiation_authkey(id);
if (!IS_ERR(instkey)) {
key_put(instkey);
key_ref = lookup_user_key(id,
KEY_LOOKUP_PARTIAL,
0);
if (!IS_ERR(key_ref))
goto okay;
}
}
ret = PTR_ERR(key_ref);
goto error;
}
okay:
key = key_ref_to_ptr(key_ref);
ret = 0;
if (test_bit(KEY_FLAG_KEEP, &key->flags))
ret = -EPERM;
else
key_set_timeout(key, timeout);
key_put(key);
error:
return ret;
}
/*
* Assume (or clear) the authority to instantiate the specified key.
*
* This sets the authoritative token currently in force for key instantiation.
* This must be done for a key to be instantiated. It has the effect of making
* available all the keys from the caller of the request_key() that created a
* key to request_key() calls made by the caller of this function.
*
* The caller must have the instantiation key in their process keyrings with a
* Search permission grant available to the caller.
*
* If the ID given is 0, then the setting will be cleared and 0 returned.
*
* If the ID given has a matching an authorisation key, then that key will be
* set and its ID will be returned. The authorisation key can be read to get
* the callout information passed to request_key().
*/
long keyctl_assume_authority(key_serial_t id)
{
struct key *authkey;
long ret;
/* special key IDs aren't permitted */
ret = -EINVAL;
if (id < 0)
goto error;
/* we divest ourselves of authority if given an ID of 0 */
if (id == 0) {
ret = keyctl_change_reqkey_auth(NULL);
goto error;
}
/* attempt to assume the authority temporarily granted to us whilst we
* instantiate the specified key
* - the authorisation key must be in the current task's keyrings
* somewhere
*/
authkey = key_get_instantiation_authkey(id);
if (IS_ERR(authkey)) {
ret = PTR_ERR(authkey);
goto error;
}
ret = keyctl_change_reqkey_auth(authkey);
if (ret < 0)
goto error;
key_put(authkey);
ret = authkey->serial;
error:
return ret;
}
/*
* Get a key's the LSM security label.
*
* The key must grant the caller View permission for this to work.
*
* If there's a buffer, then up to buflen bytes of data will be placed into it.
*
* If successful, the amount of information available will be returned,
* irrespective of how much was copied (including the terminal NUL).
*/
long keyctl_get_security(key_serial_t keyid,
char __user *buffer,
size_t buflen)
{
struct key *key, *instkey;
key_ref_t key_ref;
char *context;
long ret;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
if (PTR_ERR(key_ref) != -EACCES)
return PTR_ERR(key_ref);
/* viewing a key under construction is also permitted if we
* have the authorisation token handy */
instkey = key_get_instantiation_authkey(keyid);
if (IS_ERR(instkey))
return PTR_ERR(instkey);
key_put(instkey);
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, 0);
if (IS_ERR(key_ref))
return PTR_ERR(key_ref);
}
key = key_ref_to_ptr(key_ref);
ret = security_key_getsecurity(key, &context);
if (ret == 0) {
/* if no information was returned, give userspace an empty
* string */
ret = 1;
if (buffer && buflen > 0 &&
copy_to_user(buffer, "", 1) != 0)
ret = -EFAULT;
} else if (ret > 0) {
/* return as much data as there's room for */
if (buffer && buflen > 0) {
if (buflen > ret)
buflen = ret;
if (copy_to_user(buffer, context, buflen) != 0)
ret = -EFAULT;
}
kfree(context);
}
key_ref_put(key_ref);
return ret;
}
/*
* Attempt to install the calling process's session keyring on the process's
* parent process.
*
* The keyring must exist and must grant the caller LINK permission, and the
* parent process must be single-threaded and must have the same effective
* ownership as this process and mustn't be SUID/SGID.
*
* The keyring will be emplaced on the parent when it next resumes userspace.
*
* If successful, 0 will be returned.
*/
long keyctl_session_to_parent(void)
{
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
struct callback_head *newwork, *oldwork;
key_ref_t keyring_r;
struct cred *cred;
int ret;
keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
if (IS_ERR(keyring_r))
return PTR_ERR(keyring_r);
ret = -ENOMEM;
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
cred = cred_alloc_blank();
if (!cred)
goto error_keyring;
newwork = &cred->rcu;
cred->session_keyring = key_ref_to_ptr(keyring_r);
keyring_r = NULL;
init_task_work(newwork, key_change_session_keyring);
me = current;
rcu_read_lock();
write_lock_irq(&tasklist_lock);
ret = -EPERM;
oldwork = NULL;
parent = me->real_parent;
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
goto unlock;
/* the parent must be single threaded */
if (!thread_group_empty(parent))
goto unlock;
/* the parent and the child must have different session keyrings or
* there's no point */
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
mycred->session_keyring == pcred->session_keyring) {
ret = 0;
goto unlock;
}
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
if (!uid_eq(pcred->uid, mycred->euid) ||
!uid_eq(pcred->euid, mycred->euid) ||
!uid_eq(pcred->suid, mycred->euid) ||
!gid_eq(pcred->gid, mycred->egid) ||
!gid_eq(pcred->egid, mycred->egid) ||
!gid_eq(pcred->sgid, mycred->egid))
goto unlock;
/* the keyrings must have the same UID */
if ((pcred->session_keyring &&
!uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
!uid_eq(mycred->session_keyring->uid, mycred->euid))
goto unlock;
/* cancel an already pending keyring replacement */
oldwork = task_work_cancel(parent, key_change_session_keyring);
/* the replacement session keyring is applied just prior to userspace
* restarting */
ret = task_work_add(parent, newwork, true);
if (!ret)
newwork = NULL;
unlock:
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
if (oldwork)
put_cred(container_of(oldwork, struct cred, rcu));
if (newwork)
put_cred(cred);
return ret;
error_keyring:
key_ref_put(keyring_r);
return ret;
}
/*
* Apply a restriction to a given keyring.
*
* The caller must have Setattr permission to change keyring restrictions.
*
* The requested type name may be a NULL pointer to reject all attempts
* to link to the keyring. If _type is non-NULL, _restriction can be
* NULL or a pointer to a string describing the restriction. If _type is
* NULL, _restriction must also be NULL.
*
* Returns 0 if successful.
*/
long keyctl_restrict_keyring(key_serial_t id, const char __user *_type,
const char __user *_restriction)
{
key_ref_t key_ref;
bool link_reject = !_type;
char type[32];
char *restriction = NULL;
long ret;
key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
if (IS_ERR(key_ref))
return PTR_ERR(key_ref);
if (_type) {
ret = key_get_type_from_user(type, _type, sizeof(type));
if (ret < 0)
goto error;
}
if (_restriction) {
if (!_type) {
ret = -EINVAL;
goto error;
}
restriction = strndup_user(_restriction, PAGE_SIZE);
if (IS_ERR(restriction)) {
ret = PTR_ERR(restriction);
goto error;
}
}
ret = keyring_restrict(key_ref, link_reject ? NULL : type, restriction);
kfree(restriction);
error:
key_ref_put(key_ref);
return ret;
}
/*
* The key control system call
*/
SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
switch (option) {
case KEYCTL_GET_KEYRING_ID:
return keyctl_get_keyring_ID((key_serial_t) arg2,
(int) arg3);
case KEYCTL_JOIN_SESSION_KEYRING:
return keyctl_join_session_keyring((const char __user *) arg2);
case KEYCTL_UPDATE:
return keyctl_update_key((key_serial_t) arg2,
(const void __user *) arg3,
(size_t) arg4);
case KEYCTL_REVOKE:
return keyctl_revoke_key((key_serial_t) arg2);
case KEYCTL_DESCRIBE:
return keyctl_describe_key((key_serial_t) arg2,
(char __user *) arg3,
(unsigned) arg4);
case KEYCTL_CLEAR:
return keyctl_keyring_clear((key_serial_t) arg2);
case KEYCTL_LINK:
return keyctl_keyring_link((key_serial_t) arg2,
(key_serial_t) arg3);
case KEYCTL_UNLINK:
return keyctl_keyring_unlink((key_serial_t) arg2,
(key_serial_t) arg3);
case KEYCTL_SEARCH:
return keyctl_keyring_search((key_serial_t) arg2,
(const char __user *) arg3,
(const char __user *) arg4,
(key_serial_t) arg5);
case KEYCTL_READ:
return keyctl_read_key((key_serial_t) arg2,
(char __user *) arg3,
(size_t) arg4);
case KEYCTL_CHOWN:
return keyctl_chown_key((key_serial_t) arg2,
(uid_t) arg3,
(gid_t) arg4);
case KEYCTL_SETPERM:
return keyctl_setperm_key((key_serial_t) arg2,
(key_perm_t) arg3);
case KEYCTL_INSTANTIATE:
return keyctl_instantiate_key((key_serial_t) arg2,
(const void __user *) arg3,
(size_t) arg4,
(key_serial_t) arg5);
case KEYCTL_NEGATE:
return keyctl_negate_key((key_serial_t) arg2,
(unsigned) arg3,
(key_serial_t) arg4);
case KEYCTL_SET_REQKEY_KEYRING:
return keyctl_set_reqkey_keyring(arg2);
case KEYCTL_SET_TIMEOUT:
return keyctl_set_timeout((key_serial_t) arg2,
(unsigned) arg3);
case KEYCTL_ASSUME_AUTHORITY:
return keyctl_assume_authority((key_serial_t) arg2);
case KEYCTL_GET_SECURITY:
return keyctl_get_security((key_serial_t) arg2,
(char __user *) arg3,
(size_t) arg4);
case KEYCTL_SESSION_TO_PARENT:
return keyctl_session_to_parent();
case KEYCTL_REJECT:
return keyctl_reject_key((key_serial_t) arg2,
(unsigned) arg3,
(unsigned) arg4,
(key_serial_t) arg5);
case KEYCTL_INSTANTIATE_IOV:
return keyctl_instantiate_key_iov(
(key_serial_t) arg2,
(const struct iovec __user *) arg3,
(unsigned) arg4,
(key_serial_t) arg5);
case KEYCTL_INVALIDATE:
return keyctl_invalidate_key((key_serial_t) arg2);
case KEYCTL_GET_PERSISTENT:
return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
case KEYCTL_DH_COMPUTE:
return keyctl_dh_compute((struct keyctl_dh_params __user *) arg2,
(char __user *) arg3, (size_t) arg4,
(struct keyctl_kdf_params __user *) arg5);
case KEYCTL_RESTRICT_KEYRING:
return keyctl_restrict_keyring((key_serial_t) arg2,
(const char __user *) arg3,
(const char __user *) arg4);
default:
return -EOPNOTSUPP;
}
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_2852_0 |
crossvul-cpp_data_good_4809_1 | /*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "git2.h"
#include "git2/odb_backend.h"
#include "smart.h"
#include "refs.h"
#include "repository.h"
#include "push.h"
#include "pack-objects.h"
#include "remote.h"
#include "util.h"
#define NETWORK_XFER_THRESHOLD (100*1024)
/* The minimal interval between progress updates (in seconds). */
#define MIN_PROGRESS_UPDATE_INTERVAL 0.5
int git_smart__store_refs(transport_smart *t, int flushes)
{
gitno_buffer *buf = &t->buffer;
git_vector *refs = &t->refs;
int error, flush = 0, recvd;
const char *line_end = NULL;
git_pkt *pkt = NULL;
size_t i;
/* Clear existing refs in case git_remote_connect() is called again
* after git_remote_disconnect().
*/
git_vector_foreach(refs, i, pkt) {
git_pkt_free(pkt);
}
git_vector_clear(refs);
pkt = NULL;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data, &line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS)
return error;
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
if (recvd == 0 && !flush) {
giterr_set(GITERR_NET, "early EOF");
return GIT_EEOF;
}
continue;
}
gitno_consume(buf, line_end);
if (pkt->type == GIT_PKT_ERR) {
giterr_set(GITERR_NET, "Remote error: %s", ((git_pkt_err *)pkt)->error);
git__free(pkt);
return -1;
}
if (pkt->type != GIT_PKT_FLUSH && git_vector_insert(refs, pkt) < 0)
return -1;
if (pkt->type == GIT_PKT_FLUSH) {
flush++;
git_pkt_free(pkt);
}
} while (flush < flushes);
return flush;
}
static int append_symref(const char **out, git_vector *symrefs, const char *ptr)
{
int error;
const char *end;
git_buf buf = GIT_BUF_INIT;
git_refspec *mapping = NULL;
ptr += strlen(GIT_CAP_SYMREF);
if (*ptr != '=')
goto on_invalid;
ptr++;
if (!(end = strchr(ptr, ' ')) &&
!(end = strchr(ptr, '\0')))
goto on_invalid;
if ((error = git_buf_put(&buf, ptr, end - ptr)) < 0)
return error;
/* symref mapping has refspec format */
mapping = git__calloc(1, sizeof(git_refspec));
GITERR_CHECK_ALLOC(mapping);
error = git_refspec__parse(mapping, git_buf_cstr(&buf), true);
git_buf_free(&buf);
/* if the error isn't OOM, then it's a parse error; let's use a nicer message */
if (error < 0) {
if (giterr_last()->klass != GITERR_NOMEMORY)
goto on_invalid;
git__free(mapping);
return error;
}
if ((error = git_vector_insert(symrefs, mapping)) < 0)
return error;
*out = end;
return 0;
on_invalid:
giterr_set(GITERR_NET, "remote sent invalid symref");
git_refspec__free(mapping);
git__free(mapping);
return -1;
}
int git_smart__detect_caps(git_pkt_ref *pkt, transport_smart_caps *caps, git_vector *symrefs)
{
const char *ptr;
/* No refs or capabilites, odd but not a problem */
if (pkt == NULL || pkt->capabilities == NULL)
return 0;
ptr = pkt->capabilities;
while (ptr != NULL && *ptr != '\0') {
if (*ptr == ' ')
ptr++;
if (!git__prefixcmp(ptr, GIT_CAP_OFS_DELTA)) {
caps->common = caps->ofs_delta = 1;
ptr += strlen(GIT_CAP_OFS_DELTA);
continue;
}
/* Keep multi_ack_detailed before multi_ack */
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK_DETAILED)) {
caps->common = caps->multi_ack_detailed = 1;
ptr += strlen(GIT_CAP_MULTI_ACK_DETAILED);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK)) {
caps->common = caps->multi_ack = 1;
ptr += strlen(GIT_CAP_MULTI_ACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_INCLUDE_TAG)) {
caps->common = caps->include_tag = 1;
ptr += strlen(GIT_CAP_INCLUDE_TAG);
continue;
}
/* Keep side-band check after side-band-64k */
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND_64K)) {
caps->common = caps->side_band_64k = 1;
ptr += strlen(GIT_CAP_SIDE_BAND_64K);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND)) {
caps->common = caps->side_band = 1;
ptr += strlen(GIT_CAP_SIDE_BAND);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_DELETE_REFS)) {
caps->common = caps->delete_refs = 1;
ptr += strlen(GIT_CAP_DELETE_REFS);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_THIN_PACK)) {
caps->common = caps->thin_pack = 1;
ptr += strlen(GIT_CAP_THIN_PACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SYMREF)) {
int error;
if ((error = append_symref(&ptr, symrefs, ptr)) < 0)
return error;
continue;
}
/* We don't know this capability, so skip it */
ptr = strchr(ptr, ' ');
}
return 0;
}
static int recv_pkt(git_pkt **out, gitno_buffer *buf)
{
const char *ptr = buf->data, *line_end = ptr;
git_pkt *pkt = NULL;
int pkt_type, error = 0, ret;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, ptr, &line_end, buf->offset);
else
error = GIT_EBUFS;
if (error == 0)
break; /* return the pkt */
if (error < 0 && error != GIT_EBUFS)
return error;
if ((ret = gitno_recv(buf)) < 0)
return ret;
} while (error);
gitno_consume(buf, line_end);
pkt_type = pkt->type;
if (out != NULL)
*out = pkt;
else
git__free(pkt);
return pkt_type;
}
static int store_common(transport_smart *t)
{
git_pkt *pkt = NULL;
gitno_buffer *buf = &t->buffer;
int error;
do {
if ((error = recv_pkt(&pkt, buf)) < 0)
return error;
if (pkt->type == GIT_PKT_ACK) {
if (git_vector_insert(&t->common, pkt) < 0)
return -1;
} else {
git__free(pkt);
return 0;
}
} while (1);
return 0;
}
static int fetch_setup_walk(git_revwalk **out, git_repository *repo)
{
git_revwalk *walk = NULL;
git_strarray refs;
unsigned int i;
git_reference *ref;
int error;
if ((error = git_reference_list(&refs, repo)) < 0)
return error;
if ((error = git_revwalk_new(&walk, repo)) < 0)
return error;
git_revwalk_sorting(walk, GIT_SORT_TIME);
for (i = 0; i < refs.count; ++i) {
/* No tags */
if (!git__prefixcmp(refs.strings[i], GIT_REFS_TAGS_DIR))
continue;
if ((error = git_reference_lookup(&ref, repo, refs.strings[i])) < 0)
goto on_error;
if (git_reference_type(ref) == GIT_REF_SYMBOLIC)
continue;
if ((error = git_revwalk_push(walk, git_reference_target(ref))) < 0)
goto on_error;
git_reference_free(ref);
}
git_strarray_free(&refs);
*out = walk;
return 0;
on_error:
git_revwalk_free(walk);
git_reference_free(ref);
git_strarray_free(&refs);
return error;
}
static int wait_while_ack(gitno_buffer *buf)
{
int error;
git_pkt_ack *pkt = NULL;
while (1) {
git__free(pkt);
if ((error = recv_pkt((git_pkt **)&pkt, buf)) < 0)
return error;
if (pkt->type == GIT_PKT_NAK)
break;
if (pkt->type == GIT_PKT_ACK &&
(pkt->status != GIT_ACK_CONTINUE &&
pkt->status != GIT_ACK_COMMON)) {
git__free(pkt);
return 0;
}
}
git__free(pkt);
return 0;
}
int git_smart__negotiate_fetch(git_transport *transport, git_repository *repo, const git_remote_head * const *wants, size_t count)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_buf data = GIT_BUF_INIT;
git_revwalk *walk = NULL;
int error = -1, pkt_type;
unsigned int i;
git_oid oid;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
return error;
if ((error = fetch_setup_walk(&walk, repo)) < 0)
goto on_error;
/*
* Our support for ACK extensions is simply to parse them. On
* the first ACK we will accept that as enough common
* objects. We give up if we haven't found an answer in the
* first 256 we send.
*/
i = 0;
while (i < 256) {
error = git_revwalk_next(&oid, walk);
if (error < 0) {
if (GIT_ITEROVER == error)
break;
goto on_error;
}
git_pkt_buffer_have(&oid, &data);
i++;
if (i % 20 == 0) {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
git_pkt_buffer_flush(&data);
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_clear(&data);
if (t->caps.multi_ack || t->caps.multi_ack_detailed) {
if ((error = store_common(t)) < 0)
goto on_error;
} else {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type == GIT_PKT_ACK) {
break;
} else if (pkt_type == GIT_PKT_NAK) {
continue;
} else if (pkt_type < 0) {
/* recv_pkt returned an error */
error = pkt_type;
goto on_error;
} else {
giterr_set(GITERR_NET, "Unexpected pkt type");
error = -1;
goto on_error;
}
}
}
if (t->common.length > 0)
break;
if (i % 20 == 0 && t->rpc) {
git_pkt_ack *pkt;
unsigned int i;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, i, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
}
/* Tell the other end that we're done negotiating */
if (t->rpc && t->common.length > 0) {
git_pkt_ack *pkt;
unsigned int i;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, i, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_buf_oom(&data)) {
error = -1;
goto on_error;
}
}
if ((error = git_pkt_buffer_done(&data)) < 0)
goto on_error;
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_buf_free(&data);
git_revwalk_free(walk);
/* Now let's eat up whatever the server gives us */
if (!t->caps.multi_ack && !t->caps.multi_ack_detailed) {
pkt_type = recv_pkt(NULL, buf);
if (pkt_type < 0) {
return pkt_type;
} else if (pkt_type != GIT_PKT_ACK && pkt_type != GIT_PKT_NAK) {
giterr_set(GITERR_NET, "Unexpected pkt type");
return -1;
}
} else {
error = wait_while_ack(buf);
}
return error;
on_error:
git_revwalk_free(walk);
git_buf_free(&data);
return error;
}
static int no_sideband(transport_smart *t, struct git_odb_writepack *writepack, gitno_buffer *buf, git_transfer_progress *stats)
{
int recvd;
do {
if (t->cancelled.val) {
giterr_set(GITERR_NET, "The fetch was cancelled by the user");
return GIT_EUSER;
}
if (writepack->append(writepack, buf->data, buf->offset, stats) < 0)
return -1;
gitno_consume_n(buf, buf->offset);
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
} while(recvd > 0);
if (writepack->commit(writepack, stats) < 0)
return -1;
return 0;
}
struct network_packetsize_payload
{
git_transfer_progress_cb callback;
void *payload;
git_transfer_progress *stats;
size_t last_fired_bytes;
};
static int network_packetsize(size_t received, void *payload)
{
struct network_packetsize_payload *npp = (struct network_packetsize_payload*)payload;
/* Accumulate bytes */
npp->stats->received_bytes += received;
/* Fire notification if the threshold is reached */
if ((npp->stats->received_bytes - npp->last_fired_bytes) > NETWORK_XFER_THRESHOLD) {
npp->last_fired_bytes = npp->stats->received_bytes;
if (npp->callback(npp->stats, npp->payload))
return GIT_EUSER;
}
return 0;
}
int git_smart__download_pack(
git_transport *transport,
git_repository *repo,
git_transfer_progress *stats,
git_transfer_progress_cb transfer_progress_cb,
void *progress_payload)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_odb *odb;
struct git_odb_writepack *writepack = NULL;
int error = 0;
struct network_packetsize_payload npp = {0};
memset(stats, 0, sizeof(git_transfer_progress));
if (transfer_progress_cb) {
npp.callback = transfer_progress_cb;
npp.payload = progress_payload;
npp.stats = stats;
t->packetsize_cb = &network_packetsize;
t->packetsize_payload = &npp;
/* We might have something in the buffer already from negotiate_fetch */
if (t->buffer.offset > 0 && !t->cancelled.val)
if (t->packetsize_cb(t->buffer.offset, t->packetsize_payload))
git_atomic_set(&t->cancelled, 1);
}
if ((error = git_repository_odb__weakptr(&odb, repo)) < 0 ||
((error = git_odb_write_pack(&writepack, odb, transfer_progress_cb, progress_payload)) != 0))
goto done;
/*
* If the remote doesn't support the side-band, we can feed
* the data directly to the pack writer. Otherwise, we need to
* check which one belongs there.
*/
if (!t->caps.side_band && !t->caps.side_band_64k) {
error = no_sideband(t, writepack, buf, stats);
goto done;
}
do {
git_pkt *pkt = NULL;
/* Check cancellation before network call */
if (t->cancelled.val) {
giterr_clear();
error = GIT_EUSER;
goto done;
}
if ((error = recv_pkt(&pkt, buf)) >= 0) {
/* Check cancellation after network call */
if (t->cancelled.val) {
giterr_clear();
error = GIT_EUSER;
} else if (pkt->type == GIT_PKT_PROGRESS) {
if (t->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = t->progress_cb(p->data, p->len, t->message_cb_payload);
}
} else if (pkt->type == GIT_PKT_DATA) {
git_pkt_data *p = (git_pkt_data *) pkt;
if (p->len)
error = writepack->append(writepack, p->data, p->len, stats);
} else if (pkt->type == GIT_PKT_FLUSH) {
/* A flush indicates the end of the packfile */
git__free(pkt);
break;
}
}
git__free(pkt);
if (error < 0)
goto done;
} while (1);
/*
* Trailing execution of transfer_progress_cb, if necessary...
* Only the callback through the npp datastructure currently
* updates the last_fired_bytes value. It is possible that
* progress has already been reported with the correct
* "received_bytes" value, but until (if?) this is unified
* then we will report progress again to be sure that the
* correct last received_bytes value is reported.
*/
if (npp.callback && npp.stats->received_bytes > npp.last_fired_bytes) {
error = npp.callback(npp.stats, npp.payload);
if (error != 0)
goto done;
}
error = writepack->commit(writepack, stats);
done:
if (writepack)
writepack->free(writepack);
if (transfer_progress_cb) {
t->packetsize_cb = NULL;
t->packetsize_payload = NULL;
}
return error;
}
static int gen_pktline(git_buf *buf, git_push *push)
{
push_spec *spec;
size_t i, len;
char old_id[GIT_OID_HEXSZ+1], new_id[GIT_OID_HEXSZ+1];
old_id[GIT_OID_HEXSZ] = '\0'; new_id[GIT_OID_HEXSZ] = '\0';
git_vector_foreach(&push->specs, i, spec) {
len = 2*GIT_OID_HEXSZ + 7 + strlen(spec->refspec.dst);
if (i == 0) {
++len; /* '\0' */
if (push->report_status)
len += strlen(GIT_CAP_REPORT_STATUS) + 1;
len += strlen(GIT_CAP_SIDE_BAND_64K) + 1;
}
git_oid_fmt(old_id, &spec->roid);
git_oid_fmt(new_id, &spec->loid);
git_buf_printf(buf, "%04"PRIxZ"%s %s %s", len, old_id, new_id, spec->refspec.dst);
if (i == 0) {
git_buf_putc(buf, '\0');
/* Core git always starts their capabilities string with a space */
if (push->report_status) {
git_buf_putc(buf, ' ');
git_buf_printf(buf, GIT_CAP_REPORT_STATUS);
}
git_buf_putc(buf, ' ');
git_buf_printf(buf, GIT_CAP_SIDE_BAND_64K);
}
git_buf_putc(buf, '\n');
}
git_buf_puts(buf, "0000");
return git_buf_oom(buf) ? -1 : 0;
}
static int add_push_report_pkt(git_push *push, git_pkt *pkt)
{
push_status *status;
switch (pkt->type) {
case GIT_PKT_OK:
status = git__calloc(1, sizeof(push_status));
GITERR_CHECK_ALLOC(status);
status->msg = NULL;
status->ref = git__strdup(((git_pkt_ok *)pkt)->ref);
if (!status->ref ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_NG:
status = git__calloc(1, sizeof(push_status));
GITERR_CHECK_ALLOC(status);
status->ref = git__strdup(((git_pkt_ng *)pkt)->ref);
status->msg = git__strdup(((git_pkt_ng *)pkt)->msg);
if (!status->ref || !status->msg ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_UNPACK:
push->unpack_ok = ((git_pkt_unpack *)pkt)->unpack_ok;
break;
case GIT_PKT_FLUSH:
return GIT_ITEROVER;
default:
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
return 0;
}
static int add_push_report_sideband_pkt(git_push *push, git_pkt_data *data_pkt, git_buf *data_pkt_buf)
{
git_pkt *pkt;
const char *line, *line_end;
size_t line_len;
int error;
int reading_from_buf = data_pkt_buf->size > 0;
if (reading_from_buf) {
/* We had an existing partial packet, so add the new
* packet to the buffer and parse the whole thing */
git_buf_put(data_pkt_buf, data_pkt->data, data_pkt->len);
line = data_pkt_buf->ptr;
line_len = data_pkt_buf->size;
}
else {
line = data_pkt->data;
line_len = data_pkt->len;
}
while (line_len > 0) {
error = git_pkt_parse_line(&pkt, line, &line_end, line_len);
if (error == GIT_EBUFS) {
/* Buffer the data when the inner packet is split
* across multiple sideband packets */
if (!reading_from_buf)
git_buf_put(data_pkt_buf, line, line_len);
error = 0;
goto done;
}
else if (error < 0)
goto done;
/* Advance in the buffer */
line_len -= (line_end - line);
line = line_end;
error = add_push_report_pkt(push, pkt);
git_pkt_free(pkt);
if (error < 0 && error != GIT_ITEROVER)
goto done;
}
error = 0;
done:
if (reading_from_buf)
git_buf_consume(data_pkt_buf, line_end);
return error;
}
static int parse_report(transport_smart *transport, git_push *push)
{
git_pkt *pkt = NULL;
const char *line_end = NULL;
gitno_buffer *buf = &transport->buffer;
int error, recvd;
git_buf data_pkt_buf = GIT_BUF_INIT;
for (;;) {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, buf->data,
&line_end, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS) {
error = -1;
goto done;
}
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0) {
error = recvd;
goto done;
}
if (recvd == 0) {
giterr_set(GITERR_NET, "early EOF");
error = GIT_EEOF;
goto done;
}
continue;
}
gitno_consume(buf, line_end);
error = 0;
switch (pkt->type) {
case GIT_PKT_DATA:
/* This is a sideband packet which contains other packets */
error = add_push_report_sideband_pkt(push, (git_pkt_data *)pkt, &data_pkt_buf);
break;
case GIT_PKT_ERR:
giterr_set(GITERR_NET, "report-status: Error reported: %s",
((git_pkt_err *)pkt)->error);
error = -1;
break;
case GIT_PKT_PROGRESS:
if (transport->progress_cb) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
error = transport->progress_cb(p->data, p->len, transport->message_cb_payload);
}
break;
default:
error = add_push_report_pkt(push, pkt);
break;
}
git_pkt_free(pkt);
/* add_push_report_pkt returns GIT_ITEROVER when it receives a flush */
if (error == GIT_ITEROVER) {
error = 0;
if (data_pkt_buf.size > 0) {
/* If there was data remaining in the pack data buffer,
* then the server sent a partial pkt-line */
giterr_set(GITERR_NET, "Incomplete pack data pkt-line");
error = GIT_ERROR;
}
goto done;
}
if (error < 0) {
goto done;
}
}
done:
git_buf_free(&data_pkt_buf);
return error;
}
static int add_ref_from_push_spec(git_vector *refs, push_spec *push_spec)
{
git_pkt_ref *added = git__calloc(1, sizeof(git_pkt_ref));
GITERR_CHECK_ALLOC(added);
added->type = GIT_PKT_REF;
git_oid_cpy(&added->head.oid, &push_spec->loid);
added->head.name = git__strdup(push_spec->refspec.dst);
if (!added->head.name ||
git_vector_insert(refs, added) < 0) {
git_pkt_free((git_pkt *)added);
return -1;
}
return 0;
}
static int update_refs_from_report(
git_vector *refs,
git_vector *push_specs,
git_vector *push_report)
{
git_pkt_ref *ref;
push_spec *push_spec;
push_status *push_status;
size_t i, j, refs_len;
int cmp;
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report */
if (push_specs->length != push_report->length) {
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
/* We require that push_specs be sorted with push_spec_rref_cmp,
* and that push_report be sorted with push_status_ref_cmp */
git_vector_sort(push_specs);
git_vector_sort(push_report);
git_vector_foreach(push_specs, i, push_spec) {
push_status = git_vector_get(push_report, i);
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report which matches */
if (strcmp(push_spec->refspec.dst, push_status->ref)) {
giterr_set(GITERR_NET, "report-status: protocol error");
return -1;
}
}
/* We require that refs be sorted with ref_name_cmp */
git_vector_sort(refs);
i = j = 0;
refs_len = refs->length;
/* Merge join push_specs with refs */
while (i < push_specs->length && j < refs_len) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
ref = git_vector_get(refs, j);
cmp = strcmp(push_spec->refspec.dst, ref->head.name);
/* Iterate appropriately */
if (cmp <= 0) i++;
if (cmp >= 0) j++;
/* Add case */
if (cmp < 0 &&
!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
/* Update case, delete case */
if (cmp == 0 &&
!push_status->msg)
git_oid_cpy(&ref->head.oid, &push_spec->loid);
}
for (; i < push_specs->length; i++) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
/* Add case */
if (!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
}
/* Remove any refs which we updated to have a zero OID. */
git_vector_rforeach(refs, i, ref) {
if (git_oid_iszero(&ref->head.oid)) {
git_vector_remove(refs, i);
git_pkt_free((git_pkt *)ref);
}
}
git_vector_sort(refs);
return 0;
}
struct push_packbuilder_payload
{
git_smart_subtransport_stream *stream;
git_packbuilder *pb;
git_push_transfer_progress cb;
void *cb_payload;
size_t last_bytes;
double last_progress_report_time;
};
static int stream_thunk(void *buf, size_t size, void *data)
{
int error = 0;
struct push_packbuilder_payload *payload = data;
if ((error = payload->stream->write(payload->stream, (const char *)buf, size)) < 0)
return error;
if (payload->cb) {
double current_time = git__timer();
payload->last_bytes += size;
if ((current_time - payload->last_progress_report_time) >= MIN_PROGRESS_UPDATE_INTERVAL) {
payload->last_progress_report_time = current_time;
error = payload->cb(payload->pb->nr_written, payload->pb->nr_objects, payload->last_bytes, payload->cb_payload);
}
}
return error;
}
int git_smart__push(git_transport *transport, git_push *push, const git_remote_callbacks *cbs)
{
transport_smart *t = (transport_smart *)transport;
struct push_packbuilder_payload packbuilder_payload = {0};
git_buf pktline = GIT_BUF_INIT;
int error = 0, need_pack = 0;
push_spec *spec;
unsigned int i;
packbuilder_payload.pb = push->pb;
if (cbs && cbs->push_transfer_progress) {
packbuilder_payload.cb = cbs->push_transfer_progress;
packbuilder_payload.cb_payload = cbs->payload;
}
#ifdef PUSH_DEBUG
{
git_remote_head *head;
char hex[GIT_OID_HEXSZ+1]; hex[GIT_OID_HEXSZ] = '\0';
git_vector_foreach(&push->remote->refs, i, head) {
git_oid_fmt(hex, &head->oid);
fprintf(stderr, "%s (%s)\n", hex, head->name);
}
git_vector_foreach(&push->specs, i, spec) {
git_oid_fmt(hex, &spec->roid);
fprintf(stderr, "%s (%s) -> ", hex, spec->lref);
git_oid_fmt(hex, &spec->loid);
fprintf(stderr, "%s (%s)\n", hex, spec->rref ?
spec->rref : spec->lref);
}
}
#endif
/*
* Figure out if we need to send a packfile; which is in all
* cases except when we only send delete commands
*/
git_vector_foreach(&push->specs, i, spec) {
if (spec->refspec.src && spec->refspec.src[0] != '\0') {
need_pack = 1;
break;
}
}
if ((error = git_smart__get_push_stream(t, &packbuilder_payload.stream)) < 0 ||
(error = gen_pktline(&pktline, push)) < 0 ||
(error = packbuilder_payload.stream->write(packbuilder_payload.stream, git_buf_cstr(&pktline), git_buf_len(&pktline))) < 0)
goto done;
if (need_pack &&
(error = git_packbuilder_foreach(push->pb, &stream_thunk, &packbuilder_payload)) < 0)
goto done;
/* If we sent nothing or the server doesn't support report-status, then
* we consider the pack to have been unpacked successfully */
if (!push->specs.length || !push->report_status)
push->unpack_ok = 1;
else if ((error = parse_report(t, push)) < 0)
goto done;
/* If progress is being reported write the final report */
if (cbs && cbs->push_transfer_progress) {
error = cbs->push_transfer_progress(
push->pb->nr_written,
push->pb->nr_objects,
packbuilder_payload.last_bytes,
cbs->payload);
if (error < 0)
goto done;
}
if (push->status.length) {
error = update_refs_from_report(&t->refs, &push->specs, &push->status);
if (error < 0)
goto done;
error = git_smart__update_heads(t, NULL);
}
done:
git_buf_free(&pktline);
return error;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_4809_1 |
crossvul-cpp_data_bad_5488_1 | 404: Not Found | ./CrossVul/dataset_final_sorted/CWE-476/c/bad_5488_1 |
crossvul-cpp_data_bad_4696_1 | /*
* irc-protocol.c - implementation of IRC protocol (RFCs 1459/2810/2811/2812/2813)
*
* Copyright (C) 2003-2020 Sébastien Helleu <flashcode@flashtux.org>
* Copyright (C) 2006 Emmanuel Bouthenot <kolter@openics.org>
* Copyright (C) 2014 Shawn Smith <ShawnSmith0828@gmail.com>
*
* This file is part of WeeChat, the extensible chat client.
*
* WeeChat is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* WeeChat is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with WeeChat. If not, see <https://www.gnu.org/licenses/>.
*/
/* this define is needed for strptime() (not on OpenBSD/Sun) */
#if !defined(__OpenBSD__) && !defined(__sun)
#define _XOPEN_SOURCE 700
#endif
#ifndef __USE_XOPEN
#define __USE_XOPEN
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#include <sys/time.h>
#include <time.h>
#include "../weechat-plugin.h"
#include "irc.h"
#include "irc-protocol.h"
#include "irc-bar-item.h"
#include "irc-buffer.h"
#include "irc-channel.h"
#include "irc-color.h"
#include "irc-command.h"
#include "irc-config.h"
#include "irc-ctcp.h"
#include "irc-ignore.h"
#include "irc-message.h"
#include "irc-mode.h"
#include "irc-modelist.h"
#include "irc-msgbuffer.h"
#include "irc-nick.h"
#include "irc-sasl.h"
#include "irc-server.h"
#include "irc-notify.h"
/*
* Checks if a command is numeric.
*
* Returns:
* 1: all chars are numeric
* 0: command has other chars (not numeric)
*/
int
irc_protocol_is_numeric_command (const char *str)
{
while (str && str[0])
{
if (!isdigit ((unsigned char)str[0]))
return 0;
str++;
}
return 1;
}
/*
* Gets log level for IRC command.
*/
int
irc_protocol_log_level_for_command (const char *command)
{
if (!command || !command[0])
return 0;
if ((strcmp (command, "privmsg") == 0)
|| (strcmp (command, "notice") == 0))
return 1;
if (strcmp (command, "nick") == 0)
return 2;
if ((strcmp (command, "join") == 0)
|| (strcmp (command, "part") == 0)
|| (strcmp (command, "quit") == 0)
|| (strcmp (command, "nick_back") == 0))
return 4;
return 3;
}
/*
* Builds tags list with IRC command and optional tags and nick.
*/
const char *
irc_protocol_tags (const char *command, const char *tags, const char *nick,
const char *address)
{
static char string[1024];
int log_level;
char str_log_level[32];
str_log_level[0] = '\0';
if (!command && !tags && !nick)
return NULL;
if (command && command[0])
{
log_level = irc_protocol_log_level_for_command (command);
if (log_level > 0)
{
snprintf (str_log_level, sizeof (str_log_level),
",log%d", log_level);
}
}
snprintf (string, sizeof (string),
"%s%s%s%s%s%s%s%s%s",
(command && command[0]) ? "irc_" : "",
(command && command[0]) ? command : "",
(tags && tags[0]) ? "," : "",
(tags && tags[0]) ? tags : "",
(nick && nick[0]) ? ",nick_" : "",
(nick && nick[0]) ? nick : "",
(address && address[0]) ? ",host_" : "",
(address && address[0]) ? address : "",
str_log_level);
return string;
}
/*
* Builds a string with nick and optional address.
*
* If server_message is 1, the nick is colored according to option
* irc.look.color_nicks_in_server_messages.
*
* Argument nickname is mandatory, address can be NULL.
* If nickname and address are NULL, an empty string is returned.
*/
const char *
irc_protocol_nick_address (struct t_irc_server *server,
int server_message,
struct t_irc_nick *nick,
const char *nickname,
const char *address)
{
static char string[1024];
string[0] = '\0';
if (nickname && address && (strcmp (nickname, address) != 0))
{
/* display nick and address if they are different */
snprintf (string, sizeof (string),
"%s%s %s(%s%s%s)%s",
irc_nick_color_for_msg (server, server_message, nick,
nickname),
nickname,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
address,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET);
}
else if (nickname)
{
/* display only nick if no address or if nick == address */
snprintf (string, sizeof (string),
"%s%s%s",
irc_nick_color_for_msg (server, server_message, nick,
nickname),
nickname,
IRC_COLOR_RESET);
}
return string;
}
/*
* Callback for the IRC message "ACCOUNT": account info about a nick
* (with capability "account-notify").
*
* Message looks like:
* :nick!user@host ACCOUNT *
* :nick!user@host ACCOUNT accountname
*/
IRC_PROTOCOL_CALLBACK(account)
{
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
char *pos_account;
int cap_account_notify;
IRC_PROTOCOL_MIN_ARGS(3);
pos_account = (strcmp (argv[2], "*") != 0) ? argv[2] : NULL;
cap_account_notify = weechat_hashtable_has_key (server->cap_list,
"account-notify");
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
ptr_nick = irc_nick_search (server, ptr_channel, nick);
if (ptr_nick)
{
if (ptr_nick->account)
free (ptr_nick->account);
ptr_nick->account = (cap_account_notify && pos_account) ?
strdup (pos_account) : NULL;
}
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "AUTHENTICATE".
*
* Message looks like:
* AUTHENTICATE +
* AUTHENTICATE QQDaUzXAmVffxuzFy77XWBGwABBQAgdinelBrKZaR3wE7nsIETuTVY=
*/
IRC_PROTOCOL_CALLBACK(authenticate)
{
int sasl_mechanism;
char *sasl_username, *sasl_password, *answer;
const char *sasl_key;
IRC_PROTOCOL_MIN_ARGS(2);
if (irc_server_sasl_enabled (server))
{
sasl_mechanism = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_SASL_MECHANISM);
sasl_username = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_USERNAME));
sasl_password = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_PASSWORD));
sasl_key = IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_SASL_KEY);
answer = NULL;
switch (sasl_mechanism)
{
case IRC_SASL_MECHANISM_PLAIN:
answer = irc_sasl_mechanism_plain (sasl_username,
sasl_password);
break;
case IRC_SASL_MECHANISM_ECDSA_NIST256P_CHALLENGE:
answer = irc_sasl_mechanism_ecdsa_nist256p_challenge (
server, argv[1], sasl_username, sasl_key);
break;
case IRC_SASL_MECHANISM_EXTERNAL:
answer = strdup ("+");
break;
case IRC_SASL_MECHANISM_DH_BLOWFISH:
answer = irc_sasl_mechanism_dh_blowfish (
argv[1], sasl_username, sasl_password);
break;
case IRC_SASL_MECHANISM_DH_AES:
answer = irc_sasl_mechanism_dh_aes (
argv[1], sasl_username, sasl_password);
break;
}
if (answer)
{
irc_server_sendf (server, 0, NULL, "AUTHENTICATE %s", answer);
free (answer);
}
else
{
weechat_printf (
server->buffer,
_("%s%s: error building answer for SASL authentication, "
"using mechanism \"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
irc_sasl_mechanism_string[IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SASL_MECHANISM)]);
irc_server_sendf (server, 0, NULL, "CAP END");
}
if (sasl_username)
free (sasl_username);
if (sasl_password)
free (sasl_password);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "AWAY": away info about a nick (with capability
* "away-notify").
*
* Message looks like:
* :nick!user@host AWAY
* :nick!user@host AWAY :I am away
*/
IRC_PROTOCOL_CALLBACK(away)
{
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
IRC_PROTOCOL_MIN_ARGS(2);
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
ptr_nick = irc_nick_search (server, ptr_channel, nick);
if (ptr_nick)
irc_nick_set_away (server, ptr_channel, ptr_nick, (argc > 2));
}
return WEECHAT_RC_OK;
}
/*
* Callback for IRC server capabilities string hashtable map.
*/
void
irc_protocol_cap_print_cb (void *data,
struct t_hashtable *hashtable,
const char *key, const char *value)
{
char **str_caps;
/* make C compiler happy */
(void) hashtable;
str_caps = (char **)data;
if (*str_caps[0])
weechat_string_dyn_concat (str_caps, " ");
weechat_string_dyn_concat (str_caps, key);
if (value)
{
weechat_string_dyn_concat (str_caps, "=");
weechat_string_dyn_concat (str_caps, value);
}
}
/*
* Synchronizes requested capabilities for IRC server.
*/
void
irc_protocol_cap_sync (struct t_irc_server *server, int sasl)
{
char *cap_option, *cap_req, **caps_requested;
const char *ptr_cap_option;
int sasl_requested, sasl_to_do, sasl_fail;
int i, length, num_caps_requested;
sasl_requested = (sasl) ? irc_server_sasl_enabled (server) : 0;
sasl_to_do = 0;
ptr_cap_option = IRC_SERVER_OPTION_STRING(
server,
IRC_SERVER_OPTION_CAPABILITIES);
length = ((ptr_cap_option && ptr_cap_option[0]) ?
strlen (ptr_cap_option) : 0) + 16;
cap_option = malloc (length);
cap_req = malloc (length);
if (cap_option && cap_req)
{
cap_option[0] = '\0';
if (ptr_cap_option && ptr_cap_option[0])
strcat (cap_option, ptr_cap_option);
if (sasl && sasl_requested)
{
if (cap_option[0])
strcat (cap_option, ",");
strcat (cap_option, "sasl");
}
cap_req[0] = '\0';
caps_requested = weechat_string_split (
cap_option,
",",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_caps_requested);
if (caps_requested)
{
for (i = 0; i < num_caps_requested; i++)
{
if (weechat_hashtable_has_key (server->cap_ls,
caps_requested[i]) &&
!weechat_hashtable_has_key (server->cap_list,
caps_requested[i]))
{
if (sasl && strcmp (caps_requested[i], "sasl") == 0)
sasl_to_do = 1;
if (cap_req[0])
strcat (cap_req, " ");
strcat (cap_req, caps_requested[i]);
}
}
weechat_string_free_split (caps_requested);
}
if (cap_req[0])
{
weechat_printf (
server->buffer,
_("%s%s: client capability, requesting: %s"),
weechat_prefix ("network"), IRC_PLUGIN_NAME,
cap_req);
irc_server_sendf (server, 0, NULL,
"CAP REQ :%s", cap_req);
}
if (sasl)
{
if (!sasl_to_do)
irc_server_sendf (server, 0, NULL, "CAP END");
if (sasl_requested && !sasl_to_do)
{
weechat_printf (
server->buffer,
_("%s%s: client capability: SASL not supported"),
weechat_prefix ("network"), IRC_PLUGIN_NAME);
if (weechat_config_boolean (irc_config_network_sasl_fail_unavailable))
{
/* same handling as for sasl_end_fail */
sasl_fail = IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SASL_FAIL);
if ((sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT)
|| (sasl_fail == IRC_SERVER_SASL_FAIL_DISCONNECT))
{
irc_server_disconnect (
server, 0,
(sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT) ? 1 : 0);
}
}
}
}
}
if (cap_option)
free (cap_option);
if (cap_req)
free (cap_req);
}
/*
* Callback for the IRC message "CAP": client capability.
*
* Message looks like:
* :server CAP * LS :identify-msg multi-prefix sasl
* :server CAP * ACK :sasl
* :server CAP * NAK :sasl
*/
IRC_PROTOCOL_CALLBACK(cap)
{
char *ptr_caps, **caps_supported, **caps_added, **caps_removed;
char **caps_enabled, *pos_value, *str_name, **str_caps;
char str_msg_auth[512], **str_caps_enabled, **str_caps_disabled;
int num_caps_supported, num_caps_added, num_caps_removed;
int num_caps_enabled, sasl_to_do, sasl_mechanism;
int i, timeout, last_reply;
IRC_PROTOCOL_MIN_ARGS(4);
if (strcmp (argv[3], "LS") == 0)
{
if (argc > 4)
{
if (argc > 5 && (strcmp (argv[4], "*") == 0))
{
ptr_caps = argv_eol[5];
last_reply = 0;
}
else
{
ptr_caps = argv_eol[4];
last_reply = 1;
}
if (!server->checking_cap_ls)
{
weechat_hashtable_remove_all (server->cap_ls);
server->checking_cap_ls = 1;
}
if (last_reply)
server->checking_cap_ls = 0;
if (ptr_caps[0] == ':')
ptr_caps++;
caps_supported = weechat_string_split (
ptr_caps,
" ",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_caps_supported);
if (caps_supported)
{
for (i = 0; i < num_caps_supported; i++)
{
pos_value = strstr (caps_supported[i], "=");
if (pos_value)
{
str_name = strndup (caps_supported[i],
pos_value - caps_supported[i]);
if (str_name)
{
weechat_hashtable_set (server->cap_ls,
str_name, pos_value + 1);
free (str_name);
}
}
else
{
weechat_hashtable_set (server->cap_ls,
caps_supported[i], NULL);
}
}
}
if (last_reply)
{
str_caps = weechat_string_dyn_alloc (128);
weechat_hashtable_map_string (server->cap_ls,
irc_protocol_cap_print_cb,
str_caps);
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, server supports: %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
*str_caps);
weechat_string_dyn_free (str_caps, 1);
}
/* auto-enable capabilities only when connecting to server */
if (last_reply && !server->is_connected)
irc_protocol_cap_sync (server, 1);
if (caps_supported)
weechat_string_free_split (caps_supported);
}
}
else if (strcmp (argv[3], "LIST") == 0)
{
if (argc > 4)
{
if (argc > 5 && (strcmp (argv[4], "*") == 0))
{
ptr_caps = argv_eol[5];
last_reply = 0;
}
else
{
ptr_caps = argv_eol[4];
last_reply = 1;
}
if (!server->checking_cap_list)
{
weechat_hashtable_remove_all (server->cap_list);
server->checking_cap_list = 1;
}
if (last_reply)
server->checking_cap_list = 0;
if (ptr_caps[0] == ':')
ptr_caps++;
caps_enabled = weechat_string_split (
ptr_caps,
" ",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_caps_enabled);
if (caps_enabled)
{
for (i = 0; i < num_caps_enabled; i++)
{
pos_value = strstr (caps_enabled[i], "=");
if (pos_value)
{
str_name = strndup (caps_enabled[i],
pos_value - caps_enabled[i]);
if (str_name)
{
weechat_hashtable_set (server->cap_list,
str_name, pos_value + 1);
free (str_name);
}
}
else
{
weechat_hashtable_set (server->cap_list,
caps_enabled[i], NULL);
}
}
}
if (last_reply)
{
str_caps = weechat_string_dyn_alloc (128);
weechat_hashtable_map_string (server->cap_list,
irc_protocol_cap_print_cb,
str_caps);
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, currently enabled: %s"),
weechat_prefix ("network"),
IRC_PLUGIN_NAME,
*str_caps);
weechat_string_dyn_free (str_caps, 1);
}
if (caps_enabled)
weechat_string_free_split (caps_enabled);
}
}
else if (strcmp (argv[3], "ACK") == 0)
{
if (argc > 4)
{
ptr_caps = (argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4];
sasl_to_do = 0;
str_caps_enabled = weechat_string_dyn_alloc (128);
str_caps_disabled = weechat_string_dyn_alloc (128);
caps_supported = weechat_string_split (
ptr_caps,
" ",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_caps_supported);
if (caps_supported)
{
for (i = 0; i < num_caps_supported; i++)
{
if (caps_supported[i][0] == '-')
{
if (*str_caps_disabled[0])
weechat_string_dyn_concat (str_caps_disabled, " ");
weechat_string_dyn_concat (str_caps_disabled,
caps_supported[i] + 1);
weechat_hashtable_remove (server->cap_list,
caps_supported[i] + 1);
}
else
{
if (*str_caps_enabled[0])
weechat_string_dyn_concat (str_caps_enabled, " ");
weechat_string_dyn_concat (str_caps_enabled,
caps_supported[i]);
weechat_hashtable_set (server->cap_list,
caps_supported[i], NULL);
if (strcmp (caps_supported[i], "sasl") == 0)
sasl_to_do = 1;
}
}
weechat_string_free_split (caps_supported);
}
if (*str_caps_enabled[0] && *str_caps_disabled[0])
{
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, enabled: %s, disabled: %s"),
weechat_prefix ("network"), IRC_PLUGIN_NAME,
*str_caps_enabled, *str_caps_disabled);
}
else if (*str_caps_enabled[0])
{
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, enabled: %s"),
weechat_prefix ("network"), IRC_PLUGIN_NAME,
*str_caps_enabled);
}
else if (*str_caps_disabled[0])
{
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, disabled: %s"),
weechat_prefix ("network"), IRC_PLUGIN_NAME,
*str_caps_disabled);
}
weechat_string_dyn_free (str_caps_enabled, 1);
weechat_string_dyn_free (str_caps_disabled, 1);
if (sasl_to_do)
{
sasl_mechanism = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_SASL_MECHANISM);
if ((sasl_mechanism >= 0)
&& (sasl_mechanism < IRC_NUM_SASL_MECHANISMS))
{
snprintf (str_msg_auth, sizeof (str_msg_auth),
"AUTHENTICATE %s",
irc_sasl_mechanism_string[sasl_mechanism]);
weechat_string_toupper (str_msg_auth);
irc_server_sendf (server, 0, NULL, str_msg_auth);
if (server->hook_timer_sasl)
weechat_unhook (server->hook_timer_sasl);
timeout = IRC_SERVER_OPTION_INTEGER(
server, IRC_SERVER_OPTION_SASL_TIMEOUT);
server->hook_timer_sasl = weechat_hook_timer (
timeout * 1000,
0, 1,
&irc_server_timer_sasl_cb,
server, NULL);
}
}
}
}
else if (strcmp (argv[3], "NAK") == 0)
{
if (argc > 4)
{
ptr_caps = (argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4];
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, refused: %s"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, ptr_caps);
if (!server->is_connected)
irc_server_sendf (server, 0, NULL, "CAP END");
}
}
else if (strcmp (argv[3], "NEW") == 0)
{
if (argc > 4)
{
ptr_caps = (argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4];
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, now available: %s"),
weechat_prefix ("network"), IRC_PLUGIN_NAME, ptr_caps);
caps_added = weechat_string_split (
ptr_caps,
" ",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_caps_added);
if (caps_added)
{
for (i = 0; i < num_caps_added; i++)
{
pos_value = strstr (caps_added[i], "=");
if (pos_value)
{
str_name = strndup (caps_added[i],
pos_value - caps_added[i]);
if (str_name)
{
weechat_hashtable_set (server->cap_ls,
str_name, pos_value + 1);
free (str_name);
}
}
else
{
weechat_hashtable_set (server->cap_ls,
caps_added[i], NULL);
}
}
weechat_string_free_split (caps_added);
}
/* TODO: SASL Reauthentication */
irc_protocol_cap_sync (server, 0);
}
}
else if (strcmp (argv[3], "DEL") == 0)
{
if (argc > 4)
{
ptr_caps = (argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4];
weechat_printf_date_tags (
server->buffer, date, NULL,
_("%s%s: client capability, removed: %s"),
weechat_prefix ("network"), IRC_PLUGIN_NAME, ptr_caps);
caps_removed = weechat_string_split (
ptr_caps,
" ",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_caps_removed);
if (caps_removed)
{
for (i = 0; i < num_caps_removed; i++)
{
weechat_hashtable_remove (server->cap_ls, caps_removed[i]);
weechat_hashtable_remove (server->cap_list, caps_removed[i]);
}
weechat_string_free_split (caps_removed);
}
}
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "CHGHOST": user/host change of a nick (with
* capability "chghost"):
* https://ircv3.net/specs/extensions/chghost-3.2.html
*
* Message looks like:
* :nick!user@host CHGHOST user new.host.goes.here
* :nick!user@host CHGHOST newuser host
* :nick!user@host CHGHOST newuser new.host.goes.here
* :nick!user@host CHGHOST newuser :new.host.goes.here
*/
IRC_PROTOCOL_CALLBACK(chghost)
{
int length, local_chghost, smart_filter;
char *str_host, *pos_new_host;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
struct t_irc_channel_speaking *ptr_nick_speaking;
IRC_PROTOCOL_MIN_ARGS(4);
IRC_PROTOCOL_CHECK_HOST;
local_chghost = (irc_server_strcasecmp (server, nick, server->nick) == 0);
pos_new_host = (argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3];
length = strlen (argv[2]) + 1 + strlen (pos_new_host) + 1;
str_host = malloc (length);
if (!str_host)
{
weechat_printf (
server->buffer,
_("%s%s: not enough memory for \"%s\" command"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, "chghost");
return WEECHAT_RC_OK;
}
snprintf (str_host, length, "%s@%s", argv[2], pos_new_host);
if (local_chghost)
irc_server_set_host (server, str_host);
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
ptr_nick = irc_nick_search (server, ptr_channel, nick);
if (ptr_nick)
{
if (!ignored)
{
ptr_nick_speaking = ((weechat_config_boolean (irc_config_look_smart_filter))
&& (weechat_config_boolean (irc_config_look_smart_filter_chghost))) ?
irc_channel_nick_speaking_time_search (server, ptr_channel, nick, 1) : NULL;
smart_filter = (!local_chghost
&& weechat_config_boolean (irc_config_look_smart_filter)
&& weechat_config_boolean (irc_config_look_smart_filter_chghost)
&& !ptr_nick_speaking);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (
command,
smart_filter ? "irc_smart_filter" : NULL,
nick, address),
_("%s%s%s%s (%s%s%s)%s has changed host to %s%s"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
(address) ? address : "",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_MESSAGE_CHGHOST,
IRC_COLOR_CHAT_HOST,
str_host);
}
irc_nick_set_host (ptr_nick, str_host);
}
}
free (str_host);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "ERROR".
*
* Message looks like:
* ERROR :Closing Link: irc.server.org (Bad Password)
*/
IRC_PROTOCOL_CALLBACK(error)
{
char *ptr_args;
IRC_PROTOCOL_MIN_ARGS(2);
ptr_args = (argv_eol[1][0] == ':') ? argv_eol[1] + 1 : argv_eol[1];
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, NULL, NULL, NULL),
"%s%s",
weechat_prefix ("error"),
ptr_args);
if (strncmp (ptr_args, "Closing Link", 12) == 0)
{
irc_server_disconnect (server, !server->is_connected, 1);
}
return WEECHAT_RC_OK;
}
/*
* Callback for an IRC error message (used by many error messages, but not for
* message "ERROR").
*
* Example of error:
* :server 404 nick #channel :Cannot send to channel
*/
IRC_PROTOCOL_CALLBACK(generic_error)
{
int first_arg;
char *chan_nick, *args;
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(4);
first_arg = (irc_server_strcasecmp (server, argv[2], server->nick) == 0) ? 3 : 2;
if ((argv[first_arg][0] != ':') && argv[first_arg + 1])
{
chan_nick = argv[first_arg];
args = argv_eol[first_arg + 1];
}
else
{
chan_nick = NULL;
args = argv_eol[first_arg];
}
if (args[0] == ':')
args++;
ptr_channel = NULL;
if (chan_nick)
ptr_channel = irc_channel_search (server, chan_nick);
ptr_buffer = (ptr_channel) ? ptr_channel->buffer : server->buffer;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command,
((strcmp (command, "401") == 0)
|| (strcmp (command, "402") == 0)) ? "whois" : NULL,
ptr_buffer),
date,
irc_protocol_tags (command, NULL, NULL, NULL),
"%s%s%s%s%s%s",
weechat_prefix ("network"),
(ptr_channel && chan_nick
&& (irc_server_strcasecmp (server, chan_nick,
ptr_channel->name) == 0)) ?
IRC_COLOR_CHAT_CHANNEL : "",
(chan_nick) ? chan_nick : "",
IRC_COLOR_RESET,
(chan_nick) ? ": " : "",
args);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "INVITE".
*
* Message looks like:
* :nick!user@host INVITE mynick :#channel
*
* With invite-notify capability
* (https://ircv3.net/specs/extensions/invite-notify-3.2.html):
* :<inviter> INVITE <target> <channel>
* :ChanServ!ChanServ@example.com INVITE Attila #channel
*/
IRC_PROTOCOL_CALLBACK(invite)
{
IRC_PROTOCOL_MIN_ARGS(4);
IRC_PROTOCOL_CHECK_HOST;
if (ignored)
return WEECHAT_RC_OK;
if (irc_server_strcasecmp (server, argv[2], server->nick) == 0)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, nick, command, NULL, NULL),
date,
irc_protocol_tags (command, "notify_highlight", nick, address),
_("%sYou have been invited to %s%s%s by %s%s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
(argv[3][0] == ':') ? argv[3] + 1 : argv[3],
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 1, NULL, nick),
nick,
IRC_COLOR_RESET);
}
else
{
/* CAP invite-notify */
/* imitate numeric 341 output */
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, nick, command, NULL, NULL),
date,
irc_protocol_tags (command, NULL, nick, address),
_("%s%s%s%s has invited %s%s%s to %s%s%s"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, NULL, nick),
nick,
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 1, NULL, argv[2]),
argv[2],
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
(argv[3][0] == ':') ? argv[3] + 1 : argv[3],
IRC_COLOR_RESET);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "JOIN".
*
* Message looks like:
* :nick!user@host JOIN :#channel
*
* With extended-join capability:
* :nick!user@host JOIN :#channel * :real name
* :nick!user@host JOIN :#channel account :real name
*/
IRC_PROTOCOL_CALLBACK(join)
{
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
struct t_irc_channel_speaking *ptr_nick_speaking;
char *pos_channel, *pos_account, *pos_realname;
char str_account[512], str_realname[512];
int local_join, display_host, smart_filter;
IRC_PROTOCOL_MIN_ARGS(3);
IRC_PROTOCOL_CHECK_HOST;
local_join = (irc_server_strcasecmp (server, nick, server->nick) == 0);
pos_channel = (argv[2][0] == ':') ? argv[2] + 1 : argv[2];
pos_account = ((argc > 3) && (strcmp (argv[3], "*") != 0)) ?
argv[3] : NULL;
pos_realname = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
str_account[0] = '\0';
if (pos_account)
{
snprintf (str_account, sizeof (str_account),
"%s [%s%s%s]",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
pos_account,
IRC_COLOR_CHAT_DELIMITERS);
}
str_realname[0] = '\0';
if (pos_realname)
{
snprintf (str_realname, sizeof (str_realname),
"%s (%s%s%s)",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
pos_realname,
IRC_COLOR_CHAT_DELIMITERS);
}
ptr_channel = irc_channel_search (server, pos_channel);
if (ptr_channel)
{
ptr_channel->part = 0;
}
else
{
/*
* if someone else joins and channel is not opened, then just
* ignore it (we should receive our self join first)
*/
if (!local_join)
return WEECHAT_RC_OK;
ptr_channel = irc_channel_new (server, IRC_CHANNEL_TYPE_CHANNEL,
pos_channel, 1, 1);
if (!ptr_channel)
{
weechat_printf (server->buffer,
_("%s%s: cannot create new channel \"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
pos_channel);
return WEECHAT_RC_ERROR;
}
}
/*
* local join? clear nicklist to be sure it is empty (when using znc, after
* reconnection to network, we receive a JOIN for channel with existing
* nicks in irc plugin, so we need to clear the nicklist now)
*/
if (local_join)
irc_nick_free_all (server, ptr_channel);
/* reset some variables if joining new channel */
if (!ptr_channel->nicks)
{
irc_channel_set_topic (ptr_channel, NULL);
if (ptr_channel->modes)
{
free (ptr_channel->modes);
ptr_channel->modes = NULL;
}
ptr_channel->limit = 0;
weechat_hashtable_remove_all (ptr_channel->join_msg_received);
ptr_channel->checking_whox = 0;
}
/* add nick in channel */
ptr_nick = irc_nick_new (server, ptr_channel, nick, address, NULL, 0,
(pos_account) ? pos_account : NULL,
(pos_realname) ? pos_realname : NULL);
/* rename the nick if it was in list with a different case */
irc_channel_nick_speaking_rename_if_present (server, ptr_channel, nick);
if (!ignored)
{
ptr_nick_speaking = ((weechat_config_boolean (irc_config_look_smart_filter))
&& (weechat_config_boolean (irc_config_look_smart_filter_join))) ?
irc_channel_nick_speaking_time_search (server, ptr_channel, nick, 1) : NULL;
display_host = (local_join) ?
weechat_config_boolean (irc_config_look_display_host_join_local) :
weechat_config_boolean (irc_config_look_display_host_join);
/*
* "smart" filter the join message is it's not a join from myself, if
* smart filtering is enabled, and if nick was not speaking in channel
*/
smart_filter = (!local_join
&& weechat_config_boolean (irc_config_look_smart_filter)
&& weechat_config_boolean (irc_config_look_smart_filter_join)
&& !ptr_nick_speaking);
/* display the join */
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL,
ptr_channel->buffer),
date,
irc_protocol_tags (command,
smart_filter ? "irc_smart_filter" : NULL,
nick, address),
_("%s%s%s%s%s%s%s%s%s%s%s%s has joined %s%s%s"),
weechat_prefix ("join"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
str_account,
str_realname,
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? " (" : "",
IRC_COLOR_CHAT_HOST,
(display_host) ? address : "",
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? ")" : "",
IRC_COLOR_MESSAGE_JOIN,
IRC_COLOR_CHAT_CHANNEL,
pos_channel,
IRC_COLOR_MESSAGE_JOIN);
/*
* if join is smart filtered, save the nick in hashtable, and if nick
* is speaking shortly after the join, it will be unmasked
* (option irc.look.smart_filter_join_unmask)
*/
if (smart_filter)
{
irc_channel_join_smart_filtered_add (ptr_channel, nick,
time (NULL));
}
/* display message in private if private has flag "has_quit_server" */
if (!local_join)
{
irc_channel_display_nick_back_in_pv (server, ptr_nick, nick);
irc_channel_set_topic_private_buffers (server, ptr_nick, nick,
address);
}
}
if (local_join)
{
irc_server_set_host (server, address);
irc_bar_item_update_channel ();
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "KICK".
*
* Message looks like:
* :nick1!user@host KICK #channel nick2 :kick reason
*/
IRC_PROTOCOL_CALLBACK(kick)
{
char *pos_comment;
const char *ptr_autorejoin;
int rejoin;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick, *ptr_nick_kicked;
IRC_PROTOCOL_MIN_ARGS(4);
IRC_PROTOCOL_CHECK_HOST;
pos_comment = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
ptr_channel = irc_channel_search (server, argv[2]);
if (!ptr_channel)
return WEECHAT_RC_OK;
ptr_nick = irc_nick_search (server, ptr_channel, nick);
ptr_nick_kicked = irc_nick_search (server, ptr_channel, argv[3]);
if (pos_comment)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL,
ptr_channel->buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%s%s%s has kicked %s%s%s %s(%s%s%s)"),
weechat_prefix ("quit"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_MESSAGE_KICK,
irc_nick_color_for_msg (server, 1, ptr_nick_kicked, argv[3]),
argv[3],
IRC_COLOR_MESSAGE_KICK,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_REASON_KICK,
pos_comment,
IRC_COLOR_CHAT_DELIMITERS);
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL,
ptr_channel->buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%s%s%s has kicked %s%s%s"),
weechat_prefix ("quit"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_MESSAGE_KICK,
irc_nick_color_for_msg (server, 1, ptr_nick_kicked, argv[3]),
argv[3],
IRC_COLOR_MESSAGE_KICK);
}
if (irc_server_strcasecmp (server, argv[3], server->nick) == 0)
{
/*
* my nick was kicked => free all nicks, channel is not active any
* more
*/
irc_nick_free_all (server, ptr_channel);
irc_channel_modelist_set_state (ptr_channel,
IRC_MODELIST_STATE_MODIFIED);
/* read option "autorejoin" in server */
rejoin = IRC_SERVER_OPTION_BOOLEAN(server, IRC_SERVER_OPTION_AUTOREJOIN);
/*
* if buffer has a local variable "autorejoin", use it
* (it has higher priority than server option
*/
ptr_autorejoin = weechat_buffer_get_string (ptr_channel->buffer,
"localvar_autorejoin");
if (ptr_autorejoin)
rejoin = weechat_config_string_to_boolean (ptr_autorejoin);
if (rejoin)
{
if (IRC_SERVER_OPTION_INTEGER(server,
IRC_SERVER_OPTION_AUTOREJOIN_DELAY) == 0)
{
/* immediately rejoin if delay is 0 */
irc_channel_rejoin (server, ptr_channel);
}
else
{
/* rejoin channel later, according to delay */
ptr_channel->hook_autorejoin =
weechat_hook_timer (
IRC_SERVER_OPTION_INTEGER(server,
IRC_SERVER_OPTION_AUTOREJOIN_DELAY) * 1000,
0, 1,
&irc_channel_autorejoin_cb,
ptr_channel, NULL);
}
}
irc_bar_item_update_channel ();
}
else
{
/*
* someone was kicked from channel (but not me) => remove only this
* nick
*/
if (ptr_nick_kicked)
irc_nick_free (server, ptr_channel, ptr_nick_kicked);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "KILL".
*
* Message looks like:
* :nick1!user@host KILL mynick :kill reason
*/
IRC_PROTOCOL_CALLBACK(kill)
{
char *pos_comment;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick, *ptr_nick_killed;
IRC_PROTOCOL_MIN_ARGS(3);
IRC_PROTOCOL_CHECK_HOST;
pos_comment = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
ptr_nick = irc_nick_search (server, ptr_channel, nick);
ptr_nick_killed = irc_nick_search (server, ptr_channel, argv[2]);
if (pos_comment)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL,
ptr_channel->buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%sYou were killed by %s%s%s %s(%s%s%s)"),
weechat_prefix ("quit"),
IRC_COLOR_MESSAGE_KICK,
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_MESSAGE_KICK,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_REASON_KICK,
pos_comment,
IRC_COLOR_CHAT_DELIMITERS);
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL,
ptr_channel->buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%sYou were killed by %s%s%s"),
weechat_prefix ("quit"),
IRC_COLOR_MESSAGE_KICK,
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_MESSAGE_KICK);
}
if (irc_server_strcasecmp (server, argv[2], server->nick) == 0)
{
/*
* my nick was killed => free all nicks, channel is not active any
* more
*/
irc_nick_free_all (server, ptr_channel);
irc_channel_modelist_set_state (ptr_channel,
IRC_MODELIST_STATE_MODIFIED);
irc_bar_item_update_channel ();
}
else
{
/*
* someone was killed on channel (but not me) => remove only this
* nick
*/
if (ptr_nick_killed)
irc_nick_free (server, ptr_channel, ptr_nick_killed);
}
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "MODE".
*
* Message looks like:
* :nick!user@host MODE #test +o nick
* :nick!user@host MODE #test :+o :nick
*/
IRC_PROTOCOL_CALLBACK(mode)
{
char *pos_modes, *pos_modes_args, *modes_args;
int smart_filter, local_mode;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(4);
IRC_PROTOCOL_CHECK_HOST;
pos_modes = (argv[3][0] == ':') ? argv[3] + 1 : argv[3];
pos_modes_args = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
if (irc_channel_is_channel (server, argv[2]))
{
smart_filter = 0;
ptr_channel = irc_channel_search (server, argv[2]);
if (ptr_channel)
{
smart_filter = irc_mode_channel_set (server, ptr_channel, host,
pos_modes, pos_modes_args);
}
local_mode = (irc_server_strcasecmp (server, nick, server->nick) == 0);
ptr_nick = irc_nick_search (server, ptr_channel, nick);
ptr_buffer = (ptr_channel) ? ptr_channel->buffer : server->buffer;
modes_args = irc_mode_get_arguments (pos_modes_args);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL,
ptr_buffer),
date,
irc_protocol_tags (command,
(smart_filter && !local_mode) ?
"irc_smart_filter" : NULL,
NULL, address),
_("%sMode %s%s %s[%s%s%s%s%s]%s by %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
(ptr_channel) ? ptr_channel->name : argv[2],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
pos_modes,
(modes_args && modes_args[0]) ? " " : "",
(modes_args && modes_args[0]) ? modes_args : "",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick);
if (modes_args)
free (modes_args);
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%sUser mode %s[%s%s%s]%s by %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
pos_modes,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 1, NULL, nick),
nick);
irc_mode_user_set (server, pos_modes, 0);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "NICK".
*
* Message looks like:
* :oldnick!user@host NICK :newnick
*/
IRC_PROTOCOL_CALLBACK(nick)
{
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick, *ptr_nick_found;
char *new_nick, *old_color, str_tags[512];
const char *buffer_name;
int local_nick, smart_filter;
struct t_irc_channel_speaking *ptr_nick_speaking;
IRC_PROTOCOL_MIN_ARGS(3);
IRC_PROTOCOL_CHECK_HOST;
new_nick = (argv[2][0] == ':') ? argv[2] + 1 : argv[2];
local_nick = (irc_server_strcasecmp (server, nick, server->nick) == 0) ? 1 : 0;
if (local_nick)
{
irc_server_set_nick (server, new_nick);
irc_server_set_host (server, address);
}
ptr_nick_found = NULL;
/* first display message in server buffer if it's local nick */
if (local_nick)
{
/* temporary disable hotlist */
weechat_buffer_set (NULL, "hotlist", "-");
snprintf (str_tags, sizeof (str_tags),
"irc_nick1_%s,irc_nick2_%s",
nick,
new_nick);
weechat_printf_date_tags (
server->buffer,
date,
irc_protocol_tags (command, str_tags, NULL, address),
_("%sYou are now known as %s%s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_NICK_SELF,
new_nick,
IRC_COLOR_RESET);
/* enable hotlist */
weechat_buffer_set (NULL, "hotlist", "+");
}
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
switch (ptr_channel->type)
{
case IRC_CHANNEL_TYPE_PRIVATE:
/* rename private window if this is with "old nick" */
if ((irc_server_strcasecmp (server, ptr_channel->name, nick) == 0)
&& !irc_channel_search (server, new_nick))
{
free (ptr_channel->name);
ptr_channel->name = strdup (new_nick);
if (ptr_channel->pv_remote_nick_color)
{
free (ptr_channel->pv_remote_nick_color);
ptr_channel->pv_remote_nick_color = NULL;
}
buffer_name = irc_buffer_build_name (server->name,
ptr_channel->name);
weechat_buffer_set (ptr_channel->buffer,
"name", buffer_name);
weechat_buffer_set (ptr_channel->buffer,
"short_name", ptr_channel->name);
weechat_buffer_set (ptr_channel->buffer,
"localvar_set_channel",
ptr_channel->name);
}
break;
case IRC_CHANNEL_TYPE_CHANNEL:
/* rename nick in nicklist if found */
ptr_nick = irc_nick_search (server, ptr_channel, nick);
if (ptr_nick)
{
ptr_nick_found = ptr_nick;
/* set host in nick if needed */
irc_nick_set_host (ptr_nick, address);
/* change nick and display message on channel */
old_color = strdup (ptr_nick->color);
irc_nick_change (server, ptr_channel, ptr_nick, new_nick);
if (local_nick)
{
/* temporary disable hotlist */
weechat_buffer_set (NULL, "hotlist", "-");
snprintf (str_tags, sizeof (str_tags),
"irc_nick1_%s,irc_nick2_%s",
nick,
new_nick);
weechat_printf_date_tags (ptr_channel->buffer,
date,
irc_protocol_tags (command,
str_tags,
NULL,
address),
_("%sYou are now known as "
"%s%s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_NICK_SELF,
new_nick,
IRC_COLOR_RESET);
/* enable hotlist */
weechat_buffer_set (NULL, "hotlist", "+");
}
else
{
if (!irc_ignore_check (server, ptr_channel->name,
nick, host))
{
ptr_nick_speaking = ((weechat_config_boolean (irc_config_look_smart_filter))
&& (weechat_config_boolean (irc_config_look_smart_filter_nick))) ?
irc_channel_nick_speaking_time_search (server, ptr_channel, nick, 1) : NULL;
smart_filter = (weechat_config_boolean (irc_config_look_smart_filter)
&& weechat_config_boolean (irc_config_look_smart_filter_nick)
&& !ptr_nick_speaking);
snprintf (str_tags, sizeof (str_tags),
"%sirc_nick1_%s,irc_nick2_%s",
(smart_filter) ? "irc_smart_filter," : "",
nick,
new_nick);
weechat_printf_date_tags (
ptr_channel->buffer,
date,
irc_protocol_tags (command, str_tags, NULL,
address),
_("%s%s%s%s is now known as %s%s%s"),
weechat_prefix ("network"),
weechat_config_boolean (irc_config_look_color_nicks_in_server_messages) ?
old_color : IRC_COLOR_CHAT_NICK,
nick,
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 1, ptr_nick,
new_nick),
new_nick,
IRC_COLOR_RESET);
}
irc_channel_nick_speaking_rename (ptr_channel,
nick, new_nick);
irc_channel_nick_speaking_time_rename (server,
ptr_channel,
nick, new_nick);
irc_channel_join_smart_filtered_rename (ptr_channel,
nick,
new_nick);
}
if (old_color)
free (old_color);
}
break;
}
}
if (!local_nick)
{
irc_channel_display_nick_back_in_pv (server, ptr_nick_found, new_nick);
irc_channel_set_topic_private_buffers (server, ptr_nick_found,
new_nick, address);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "NOTICE".
*
* Message looks like:
* NOTICE AUTH :*** Looking up your hostname...
* :nick!user@host NOTICE mynick :notice text
* :nick!user@host NOTICE #channel :notice text
*/
IRC_PROTOCOL_CALLBACK(notice)
{
char *pos_target, *pos_args, *pos, end_char, *channel, status_notice[2];
const char *nick_address;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
int notify_private, is_channel, is_channel_orig;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(3);
if (ignored)
return WEECHAT_RC_OK;
status_notice[0] = '\0';
status_notice[1] = '\0';
if (argv[0][0] == ':')
{
if (argc < 4)
return WEECHAT_RC_ERROR;
pos_target = argv[2];
is_channel = irc_channel_is_channel (server, pos_target + 1);
if (is_channel
&& irc_server_prefix_char_statusmsg (server, pos_target[0]))
{
status_notice[0] = pos_target[0];
pos_target++;
}
pos_args = (argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3];
if ((status_notice[0])
&& (pos_args[0] == status_notice[0]) && (pos_args[1] == ' '))
{
pos_args += 2;
}
}
else
{
pos_target = NULL;
pos_args = (argv_eol[2][0] == ':') ? argv_eol[2] + 1 : argv_eol[2];
}
if (nick && (pos_args[0] == '\01'))
{
irc_ctcp_display_reply_from_nick (server, date, command, nick, address,
pos_args);
}
else
{
is_channel = 0;
is_channel_orig = 0;
channel = NULL;
if (pos_target)
{
is_channel = irc_channel_is_channel (server, pos_target);
is_channel_orig = is_channel;
if (is_channel)
{
channel = strdup (pos_target);
}
else if (weechat_config_boolean (irc_config_look_notice_welcome_redirect))
{
end_char = ' ';
switch (pos_args[0])
{
case '[':
end_char = ']';
break;
case '(':
end_char = ')';
break;
case '{':
end_char = '}';
break;
case '<':
end_char = '>';
break;
}
if (end_char != ' ')
{
pos = strchr (pos_args, end_char);
if (pos && (pos > pos_args + 1))
{
channel = weechat_strndup (pos_args + 1,
pos - pos_args - 1);
if (channel && irc_channel_search (server, channel))
{
is_channel = 1;
pos_args = pos + 1;
while (pos_args[0] == ' ')
{
pos_args++;
}
}
}
}
}
}
if (is_channel)
{
/* notice for channel */
ptr_channel = irc_channel_search (server, channel);
/*
* unmask a smart filtered join if it is in hashtable
* "join_smart_filtered" of channel
*/
if (ptr_channel)
irc_channel_join_smart_filtered_unmask (ptr_channel, nick);
ptr_nick = irc_nick_search (server, ptr_channel, nick);
weechat_printf_date_tags (
(ptr_channel) ? ptr_channel->buffer : server->buffer,
date,
irc_protocol_tags (command,
(is_channel_orig) ?
"notify_message" :
weechat_config_string (irc_config_look_notice_welcome_tags),
nick, address),
"%s%s%s%s%s%s%s(%s%s%s)%s: %s",
weechat_prefix ("network"),
IRC_COLOR_NOTICE,
(is_channel_orig) ? "" : "Pv",
/* TRANSLATORS: "Notice" is command name in IRC protocol (translation is frequently the same word) */
_("Notice"),
(status_notice[0]) ? ":" : "",
status_notice,
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 0, ptr_nick, nick),
(nick && nick[0]) ? nick : "?",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
pos_args);
}
else
{
/* notice for user */
notify_private = 0;
if (server->is_connected
&& nick
&& (weechat_strcasecmp (nick, "nickserv") != 0)
&& (weechat_strcasecmp (nick, "chanserv") != 0)
&& (weechat_strcasecmp (nick, "memoserv") != 0))
{
/*
* add tag "notify_private" only if:
* - server is connected (message 001 already received)
* and:
* - notice is from a non-empty nick different from
* nickserv/chanserv/memoserv
*/
notify_private = 1;
}
ptr_channel = NULL;
if (nick
&& weechat_config_integer (irc_config_look_notice_as_pv) != IRC_CONFIG_LOOK_NOTICE_AS_PV_NEVER)
{
ptr_channel = irc_channel_search (server, nick);
if (!ptr_channel
&& weechat_config_integer (irc_config_look_notice_as_pv) == IRC_CONFIG_LOOK_NOTICE_AS_PV_ALWAYS)
{
ptr_channel = irc_channel_new (server,
IRC_CHANNEL_TYPE_PRIVATE,
nick, 0, 0);
if (!ptr_channel)
{
weechat_printf (server->buffer,
_("%s%s: cannot create new "
"private buffer \"%s\""),
weechat_prefix ("error"),
IRC_PLUGIN_NAME, nick);
}
}
}
if (ptr_channel)
{
if (!ptr_channel->topic)
irc_channel_set_topic (ptr_channel, address);
weechat_printf_date_tags (
ptr_channel->buffer,
date,
irc_protocol_tags (command, "notify_private", nick,
address),
"%s%s%s%s: %s",
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 0, NULL, nick),
nick,
IRC_COLOR_RESET,
pos_args);
if ((ptr_channel->type == IRC_CHANNEL_TYPE_PRIVATE)
&& ptr_channel->has_quit_server)
{
ptr_channel->has_quit_server = 0;
}
}
else
{
ptr_buffer = irc_msgbuffer_get_target_buffer (server, nick,
command, NULL,
NULL);
/*
* if notice is sent from myself (for example another WeeChat
* via relay), then display message of outgoing notice
*/
if (nick && (irc_server_strcasecmp (server, server->nick, nick) == 0))
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command,
(notify_private) ? "notify_private" : NULL,
server->nick, address),
"%s%s%s%s -> %s%s%s: %s",
weechat_prefix ("network"),
IRC_COLOR_NOTICE,
/* TRANSLATORS: "Notice" is command name in IRC protocol (translation is frequently the same word) */
_("Notice"),
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 0, NULL, pos_target),
pos_target,
IRC_COLOR_RESET,
pos_args);
}
else
{
nick_address = irc_protocol_nick_address (server, 0, NULL,
nick, address);
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command,
(notify_private) ? "notify_private" : NULL,
nick, address),
"%s%s%s%s",
weechat_prefix ("network"),
nick_address,
(nick_address[0]) ? ": " : "",
pos_args);
}
}
}
if (channel)
free (channel);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "PART".
*
* Message looks like:
* :nick!user@host PART #channel :part message
*
* On undernet server, it can be:
* :nick!user@host PART :#channel
* :nick!user@host PART #channel :part message
*/
IRC_PROTOCOL_CALLBACK(part)
{
char *pos_comment, *join_string;
int join_length, local_part, display_host;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
struct t_irc_channel_speaking *ptr_nick_speaking;
IRC_PROTOCOL_MIN_ARGS(3);
IRC_PROTOCOL_CHECK_HOST;
pos_comment = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
ptr_channel = irc_channel_search (server,
(argv[2][0] == ':') ? argv[2] + 1 : argv[2]);
if (!ptr_channel)
return WEECHAT_RC_OK;
ptr_nick = irc_nick_search (server, ptr_channel, nick);
local_part = (irc_server_strcasecmp (server, nick, server->nick) == 0);
/* display part message */
if (!ignored)
{
ptr_nick_speaking = NULL;
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
{
ptr_nick_speaking = ((weechat_config_boolean (irc_config_look_smart_filter))
&& (weechat_config_boolean (irc_config_look_smart_filter_quit))) ?
irc_channel_nick_speaking_time_search (server, ptr_channel, nick, 1) : NULL;
}
display_host = weechat_config_boolean (irc_config_look_display_host_quit);
if (pos_comment)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (command,
(local_part
|| (ptr_channel->type != IRC_CHANNEL_TYPE_CHANNEL)
|| !weechat_config_boolean (irc_config_look_smart_filter)
|| !weechat_config_boolean (irc_config_look_smart_filter_quit)
|| ptr_nick_speaking) ?
NULL : "irc_smart_filter",
nick, address),
_("%s%s%s%s%s%s%s%s%s%s has left %s%s%s %s(%s%s%s)"),
weechat_prefix ("quit"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? " (" : "",
IRC_COLOR_CHAT_HOST,
(display_host) ? address : "",
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? ")" : "",
IRC_COLOR_MESSAGE_QUIT,
IRC_COLOR_CHAT_CHANNEL,
ptr_channel->name,
IRC_COLOR_MESSAGE_QUIT,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_REASON_QUIT,
pos_comment,
IRC_COLOR_CHAT_DELIMITERS);
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (command,
(local_part
|| (ptr_channel->type != IRC_CHANNEL_TYPE_CHANNEL)
|| !weechat_config_boolean (irc_config_look_smart_filter)
|| !weechat_config_boolean (irc_config_look_smart_filter_quit)
|| ptr_nick_speaking) ?
NULL : "irc_smart_filter",
nick, address),
_("%s%s%s%s%s%s%s%s%s%s has left %s%s%s"),
weechat_prefix ("quit"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? " (" : "",
IRC_COLOR_CHAT_HOST,
(display_host) ? address : "",
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? ")" : "",
IRC_COLOR_MESSAGE_QUIT,
IRC_COLOR_CHAT_CHANNEL,
ptr_channel->name,
IRC_COLOR_MESSAGE_QUIT);
}
}
/* part request was issued by local client ? */
if (local_part)
{
irc_nick_free_all (server, ptr_channel);
irc_channel_modelist_set_state (ptr_channel,
IRC_MODELIST_STATE_MODIFIED);
/* cycling ? => rejoin channel immediately */
if (ptr_channel->cycle)
{
ptr_channel->cycle = 0;
if (ptr_channel->key)
{
join_length = strlen (ptr_channel->name) + 1 +
strlen (ptr_channel->key) + 1;
join_string = malloc (join_length);
if (join_string)
{
snprintf (join_string, join_length, "%s %s",
ptr_channel->name,
ptr_channel->key);
irc_command_join_server (server, join_string, 1, 1);
free (join_string);
}
else
irc_command_join_server (server, ptr_channel->name,
1, 1);
}
else
irc_command_join_server (server, ptr_channel->name,
1, 1);
}
else
{
if (weechat_config_boolean (irc_config_look_part_closes_buffer))
weechat_buffer_close (ptr_channel->buffer);
else
ptr_channel->part = 1;
}
irc_bar_item_update_channel ();
}
else if (ptr_nick)
{
/* part from another user */
irc_channel_join_smart_filtered_remove (ptr_channel,
ptr_nick->name);
irc_nick_free (server, ptr_channel, ptr_nick);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "PING".
*
* Message looks like:
* PING :arguments
*/
IRC_PROTOCOL_CALLBACK(ping)
{
IRC_PROTOCOL_MIN_ARGS(2);
irc_server_sendf (server, 0, NULL, "PONG :%s",
(argv_eol[1][0] == ':') ? argv_eol[1] + 1 : argv_eol[1]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "PONG".
*
* Message looks like:
* :server PONG server :arguments
*/
IRC_PROTOCOL_CALLBACK(pong)
{
struct timeval tv;
IRC_PROTOCOL_MIN_ARGS(0);
if (server->lag_check_time.tv_sec != 0)
{
/* calculate lag (time diff with lag check) */
gettimeofday (&tv, NULL);
server->lag = (int)(weechat_util_timeval_diff (&(server->lag_check_time),
&tv) / 1000);
/* schedule next lag check */
server->lag_check_time.tv_sec = 0;
server->lag_check_time.tv_usec = 0;
server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
/* refresh lag bar item if needed */
if (server->lag != server->lag_displayed)
{
server->lag_displayed = server->lag;
irc_server_set_lag (server);
}
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, NULL, NULL, NULL),
"PONG%s%s",
(argc >= 4) ? ": " : "",
(argc >= 4) ? ((argv_eol[3][0] == ':') ?
argv_eol[3] + 1 : argv_eol[3]) : "");
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "PRIVMSG".
*
* Message looks like:
* :nick!user@host PRIVMSG #channel :message for channel here
* :nick!user@host PRIVMSG mynick :message for private here
* :nick!user@host PRIVMSG #channel :\01ACTION is testing action\01
* :nick!user@host PRIVMSG mynick :\01ACTION is testing action\01
* :nick!user@host PRIVMSG #channel :\01VERSION\01
* :nick!user@host PRIVMSG mynick :\01VERSION\01
* :nick!user@host PRIVMSG mynick :\01DCC SEND file.txt 1488915698 50612 128\01
*/
IRC_PROTOCOL_CALLBACK(privmsg)
{
char *pos_args, *pos_target, str_tags[1024], *str_color, status_msg[2];
char *color;
const char *remote_nick, *pv_tags;
int is_channel, nick_is_me;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
IRC_PROTOCOL_MIN_ARGS(4);
IRC_PROTOCOL_CHECK_HOST;
if (ignored)
return WEECHAT_RC_OK;
pos_args = (argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3];
status_msg[0] = '\0';
status_msg[1] = '\0';
pos_target = argv[2];
is_channel = irc_channel_is_channel (server, pos_target);
if (!is_channel)
{
if (irc_channel_is_channel (server, pos_target + 1)
&& irc_server_prefix_char_statusmsg (server, pos_target[0]))
{
is_channel = 1;
status_msg[0] = pos_target[0];
pos_target++;
}
}
/* receiver is a channel ? */
if (is_channel)
{
ptr_channel = irc_channel_search (server, pos_target);
if (ptr_channel)
{
/*
* unmask a smart filtered join if it is in hashtable
* "join_smart_filtered" of channel
*/
irc_channel_join_smart_filtered_unmask (ptr_channel, nick);
/* CTCP to channel */
if (pos_args[0] == '\01')
{
irc_ctcp_recv (server, date, command, ptr_channel,
address, nick, NULL, pos_args, argv_eol[0]);
return WEECHAT_RC_OK;
}
/* other message */
ptr_nick = irc_nick_search (server, ptr_channel, nick);
if (ptr_nick)
irc_nick_set_host (ptr_nick, address);
if (status_msg[0])
{
/* message to channel ops/voiced (to "@#channel" or "+#channel") */
weechat_printf_date_tags (
ptr_channel->buffer,
date,
irc_protocol_tags (command, "notify_message", nick,
address),
"%s%s%s%s%s(%s%s%s)%s: %s",
weechat_prefix ("network"),
"Msg",
(status_msg[0]) ? ":" : "",
status_msg,
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 0, ptr_nick, nick),
(nick && nick[0]) ? nick : "?",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
pos_args);
}
else
{
/* standard message (to "#channel") */
color = irc_nick_find_color_name ((ptr_nick) ? ptr_nick->name : nick);
str_color = irc_color_for_tags (color);
if (color)
free (color);
snprintf (str_tags, sizeof (str_tags),
"notify_message,prefix_nick_%s",
(str_color) ? str_color : "default");
if (str_color)
free (str_color);
weechat_printf_date_tags (
ptr_channel->buffer,
date,
irc_protocol_tags (command, str_tags, nick, address),
"%s%s",
irc_nick_as_prefix (server, ptr_nick,
(ptr_nick) ? NULL : nick,
NULL),
pos_args);
}
irc_channel_nick_speaking_add (
ptr_channel,
nick,
weechat_string_has_highlight (pos_args,
server->nick));
irc_channel_nick_speaking_time_remove_old (ptr_channel);
irc_channel_nick_speaking_time_add (server, ptr_channel, nick,
time (NULL));
}
}
else
{
nick_is_me = (irc_server_strcasecmp (server, server->nick, nick) == 0);
remote_nick = (nick_is_me) ? pos_target : nick;
/* CTCP to user */
if (pos_args[0] == '\01')
{
irc_ctcp_recv (server, date, command, NULL,
address, nick, remote_nick, pos_args, argv_eol[0]);
return WEECHAT_RC_OK;
}
/* private message received => display it */
ptr_channel = irc_channel_search (server, remote_nick);
if (!ptr_channel)
{
ptr_channel = irc_channel_new (server,
IRC_CHANNEL_TYPE_PRIVATE,
remote_nick, 0, 0);
if (!ptr_channel)
{
weechat_printf (server->buffer,
_("%s%s: cannot create new "
"private buffer \"%s\""),
weechat_prefix ("error"),
IRC_PLUGIN_NAME, remote_nick);
return WEECHAT_RC_ERROR;
}
}
irc_channel_set_topic (ptr_channel, address);
if (nick_is_me)
{
str_color = irc_color_for_tags (
weechat_config_color (
weechat_config_get ("weechat.color.chat_nick_self")));
}
else
{
if (weechat_config_boolean (irc_config_look_color_pv_nick_like_channel))
{
color = irc_nick_find_color_name (nick);
str_color = irc_color_for_tags (color);
if (color)
free (color);
}
else
{
str_color = irc_color_for_tags (
weechat_config_color (
weechat_config_get ("weechat.color.chat_nick_other")));
}
}
if (nick_is_me)
{
snprintf (str_tags, sizeof (str_tags),
"self_msg,notify_none,no_highlight,prefix_nick_%s",
(str_color) ? str_color : "default");
}
else
{
pv_tags = weechat_config_string (irc_config_look_pv_tags);
snprintf (str_tags, sizeof (str_tags),
"%s%sprefix_nick_%s",
(pv_tags && pv_tags[0]) ? pv_tags : "",
(pv_tags && pv_tags[0]) ? "," : "",
(str_color) ? str_color : "default");
}
if (str_color)
free (str_color);
weechat_printf_date_tags (
ptr_channel->buffer,
date,
irc_protocol_tags (command, str_tags, nick, address),
"%s%s",
irc_nick_as_prefix (
server, NULL, nick,
(nick_is_me) ?
IRC_COLOR_CHAT_NICK_SELF : irc_nick_color_for_pv (ptr_channel, nick)),
pos_args);
if (ptr_channel->has_quit_server)
ptr_channel->has_quit_server = 0;
(void) weechat_hook_signal_send ("irc_pv",
WEECHAT_HOOK_SIGNAL_STRING,
argv_eol[0]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "QUIT".
*
* Message looks like:
* :nick!user@host QUIT :quit message
*/
IRC_PROTOCOL_CALLBACK(quit)
{
char *pos_comment;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
struct t_irc_channel_speaking *ptr_nick_speaking;
int local_quit, display_host;
IRC_PROTOCOL_MIN_ARGS(2);
IRC_PROTOCOL_CHECK_HOST;
pos_comment = (argc > 2) ?
((argv_eol[2][0] == ':') ? argv_eol[2] + 1 : argv_eol[2]) : NULL;
for (ptr_channel = server->channels; ptr_channel;
ptr_channel = ptr_channel->next_channel)
{
if (ptr_channel->type == IRC_CHANNEL_TYPE_PRIVATE)
ptr_nick = NULL;
else
ptr_nick = irc_nick_search (server, ptr_channel, nick);
if (ptr_nick
|| (irc_server_strcasecmp (server, ptr_channel->name, nick) == 0))
{
local_quit = (irc_server_strcasecmp (server, nick, server->nick) == 0);
if (!irc_ignore_check (server, ptr_channel->name, nick, host))
{
/* display quit message */
ptr_nick_speaking = NULL;
if (ptr_channel->type == IRC_CHANNEL_TYPE_CHANNEL)
{
ptr_nick_speaking = ((weechat_config_boolean (irc_config_look_smart_filter))
&& (weechat_config_boolean (irc_config_look_smart_filter_quit))) ?
irc_channel_nick_speaking_time_search (server, ptr_channel, nick, 1) : NULL;
}
if (ptr_channel->type == IRC_CHANNEL_TYPE_PRIVATE)
{
ptr_channel->has_quit_server = 1;
}
display_host = weechat_config_boolean (irc_config_look_display_host_quit);
if (pos_comment && pos_comment[0])
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (
command,
(local_quit
|| (ptr_channel->type != IRC_CHANNEL_TYPE_CHANNEL)
|| !weechat_config_boolean (irc_config_look_smart_filter)
|| !weechat_config_boolean (irc_config_look_smart_filter_quit)
|| ptr_nick_speaking) ?
NULL : "irc_smart_filter",
nick, address),
_("%s%s%s%s%s%s%s%s%s%s has quit %s(%s%s%s)"),
weechat_prefix ("quit"),
(ptr_channel->type == IRC_CHANNEL_TYPE_PRIVATE) ?
irc_nick_color_for_pv (ptr_channel, nick) : irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? " (" : "",
IRC_COLOR_CHAT_HOST,
(display_host) ? address : "",
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? ")" : "",
IRC_COLOR_MESSAGE_QUIT,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_REASON_QUIT,
pos_comment,
IRC_COLOR_CHAT_DELIMITERS);
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (
command,
(local_quit
|| (ptr_channel->type != IRC_CHANNEL_TYPE_CHANNEL)
|| !weechat_config_boolean (irc_config_look_smart_filter)
|| !weechat_config_boolean (irc_config_look_smart_filter_quit)
|| ptr_nick_speaking) ?
NULL : "irc_smart_filter",
nick, address),
_("%s%s%s%s%s%s%s%s%s%s has quit"),
weechat_prefix ("quit"),
(ptr_channel->type == IRC_CHANNEL_TYPE_PRIVATE) ?
irc_nick_color_for_pv (ptr_channel, nick) : irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? " (" : "",
IRC_COLOR_CHAT_HOST,
(display_host) ? address : "",
IRC_COLOR_CHAT_DELIMITERS,
(display_host) ? ")" : "",
IRC_COLOR_MESSAGE_QUIT);
}
}
if (!local_quit && ptr_nick)
{
irc_channel_join_smart_filtered_remove (ptr_channel,
ptr_nick->name);
}
if (ptr_nick)
irc_nick_free (server, ptr_channel, ptr_nick);
}
}
return WEECHAT_RC_OK;
}
/*
* Callback for an IRC message with mode and reason (numeric).
*/
IRC_PROTOCOL_CALLBACK(server_mode_reason)
{
char *pos_mode, *pos_args;
IRC_PROTOCOL_MIN_ARGS(3);
/* skip nickname if at beginning of server message */
if (irc_server_strcasecmp (server, server->nick, argv[2]) == 0)
{
pos_mode = argv[3];
pos_args = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
}
else
{
pos_mode = argv[2];
pos_args = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s: %s",
weechat_prefix ("network"),
pos_mode,
(pos_args) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for a numeric IRC message.
*/
IRC_PROTOCOL_CALLBACK(numeric)
{
char *pos_args;
IRC_PROTOCOL_MIN_ARGS(3);
if (irc_server_strcasecmp (server, server->nick, argv[2]) == 0)
{
pos_args = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
}
else
{
pos_args = (argv_eol[2][0] == ':') ? argv_eol[2] + 1 : argv_eol[2];
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
pos_args);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "TOPIC".
*
* Message looks like:
* :nick!user@host TOPIC #channel :new topic for channel
*/
IRC_PROTOCOL_CALLBACK(topic)
{
char *pos_topic, *old_topic_color, *topic_color;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(3);
if (!irc_channel_is_channel (server, argv[2]))
{
weechat_printf (server->buffer,
_("%s%s: \"%s\" command received without channel"),
weechat_prefix ("error"), IRC_PLUGIN_NAME, "topic");
return WEECHAT_RC_OK;
}
pos_topic = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
ptr_channel = irc_channel_search (server, argv[2]);
ptr_nick = irc_nick_search (server, ptr_channel, nick);
ptr_buffer = (ptr_channel) ? ptr_channel->buffer : server->buffer;
/*
* unmask a smart filtered join if it is in hashtable
* "join_smart_filtered" of channel
*/
if (ptr_channel)
irc_channel_join_smart_filtered_unmask (ptr_channel, nick);
if (pos_topic && pos_topic[0])
{
topic_color = irc_color_decode (
pos_topic,
weechat_config_boolean (irc_config_network_colors_receive));
if (weechat_config_boolean (irc_config_look_display_old_topic)
&& ptr_channel && ptr_channel->topic && ptr_channel->topic[0])
{
old_topic_color = irc_color_decode (
ptr_channel->topic,
weechat_config_boolean (irc_config_network_colors_receive));
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%s%s%s has changed topic for %s%s%s from \"%s%s%s\" to "
"\"%s%s%s\""),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
argv[2],
IRC_COLOR_RESET,
IRC_COLOR_TOPIC_OLD,
(old_topic_color) ? old_topic_color : ptr_channel->topic,
IRC_COLOR_RESET,
IRC_COLOR_TOPIC_NEW,
(topic_color) ? topic_color : pos_topic,
IRC_COLOR_RESET);
if (old_topic_color)
free (old_topic_color);
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%s%s%s has changed topic for %s%s%s to \"%s%s%s\""),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
argv[2],
IRC_COLOR_RESET,
IRC_COLOR_TOPIC_NEW,
(topic_color) ? topic_color : pos_topic,
IRC_COLOR_RESET);
}
if (topic_color)
free (topic_color);
}
else
{
if (weechat_config_boolean (irc_config_look_display_old_topic)
&& ptr_channel && ptr_channel->topic && ptr_channel->topic[0])
{
old_topic_color = irc_color_decode (
ptr_channel->topic,
weechat_config_boolean (irc_config_network_colors_receive));
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%s%s%s has unset topic for %s%s%s (old topic: "
"\"%s%s%s\")"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
argv[2],
IRC_COLOR_RESET,
IRC_COLOR_TOPIC_OLD,
(old_topic_color) ? old_topic_color : ptr_channel->topic,
IRC_COLOR_RESET);
if (old_topic_color)
free (old_topic_color);
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_buffer),
date,
irc_protocol_tags (command, NULL, NULL, address),
_("%s%s%s%s has unset topic for %s%s%s"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, ptr_nick, nick),
nick,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
argv[2],
IRC_COLOR_RESET);
}
}
if (ptr_channel)
irc_channel_set_topic (ptr_channel, pos_topic);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "WALLOPS".
*
* Message looks like:
* :nick!user@host WALLOPS :message from admin
*/
IRC_PROTOCOL_CALLBACK(wallops)
{
const char *nick_address;
IRC_PROTOCOL_MIN_ARGS(3);
if (ignored)
return WEECHAT_RC_OK;
nick_address = irc_protocol_nick_address (server, 0, NULL, nick, address);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, nick, command, NULL, NULL),
date,
irc_protocol_tags (command, NULL, nick, address),
_("%sWallops from %s: %s"),
weechat_prefix ("network"),
(nick_address[0]) ? nick_address : "?",
(argv_eol[2][0] == ':') ? argv_eol[2] + 1 : argv_eol[2]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "001": connected to irc server.
*
* Message looks like:
* :server 001 mynick :Welcome to the dancer-ircd Network
*/
IRC_PROTOCOL_CALLBACK(001)
{
char *server_command, **commands, **ptr_command, *command2, *slash_command;
char *away_msg, *usermode;
int length;
IRC_PROTOCOL_MIN_ARGS(3);
if (irc_server_strcasecmp (server, server->nick, argv[2]) != 0)
irc_server_set_nick (server, argv[2]);
irc_protocol_cb_numeric (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
/* connection to IRC server is OK! */
server->is_connected = 1;
server->reconnect_delay = 0;
server->monitor_time = time (NULL) + 5;
if (server->hook_timer_connection)
{
weechat_unhook (server->hook_timer_connection);
server->hook_timer_connection = NULL;
}
server->lag_next_check = time (NULL) +
weechat_config_integer (irc_config_network_lag_check);
irc_server_set_buffer_title (server);
/* set away message if user was away (before disconnection for example) */
if (server->away_message && server->away_message[0])
{
away_msg = strdup (server->away_message);
if (away_msg)
{
irc_command_away_server (server, away_msg, 0);
free (away_msg);
}
}
/* send signal "irc_server_connected" with server name */
(void) weechat_hook_signal_send ("irc_server_connected",
WEECHAT_HOOK_SIGNAL_STRING, server->name);
/* set usermode when connected */
usermode = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_USERMODE));
if (usermode && usermode[0])
{
irc_server_sendf (server,
IRC_SERVER_SEND_OUTQ_PRIO_HIGH, NULL,
"MODE %s %s",
server->nick, usermode);
}
if (usermode)
free (usermode);
/* execute command when connected */
server_command = irc_server_eval_expression (
server,
IRC_SERVER_OPTION_STRING(server, IRC_SERVER_OPTION_COMMAND));
if (server_command && server_command[0])
{
/* split command on ';' which can be escaped with '\;' */
commands = weechat_string_split_command (server_command, ';');
if (commands)
{
for (ptr_command = commands; *ptr_command; ptr_command++)
{
command2 = irc_message_replace_vars (server, NULL,
*ptr_command);
if (command2)
{
if (weechat_string_is_command_char (command2))
{
weechat_command (server->buffer, command2);
}
else
{
length = 1 + strlen(command2) + 1;
slash_command = malloc (length);
if (slash_command)
{
snprintf (slash_command, length, "/%s", command2);
weechat_command (server->buffer, slash_command);
free (slash_command);
}
}
free (command2);
}
}
weechat_string_free_split_command (commands);
}
if (IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_COMMAND_DELAY) > 0)
server->command_time = time (NULL) + 1;
else
irc_server_autojoin_channels (server);
}
else
{
irc_server_autojoin_channels (server);
}
if (server_command)
free (server_command);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "005": some infos from server.
*
* Message looks like:
* :server 005 mynick MODES=4 CHANLIMIT=#:20 NICKLEN=16 USERLEN=10
* HOSTLEN=63 TOPICLEN=450 KICKLEN=450 CHANNELLEN=30 KEYLEN=23
* CHANTYPES=# PREFIX=(ov)@+ CASEMAPPING=ascii CAPAB IRCD=dancer
* :are available on this server
*/
IRC_PROTOCOL_CALLBACK(005)
{
char *pos, *pos2, *pos_start, *error, *isupport2;
int length_isupport, length, casemapping;
long value;
IRC_PROTOCOL_MIN_ARGS(4);
irc_protocol_cb_numeric (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
/* save prefix */
pos = strstr (argv_eol[3], "PREFIX=");
if (pos)
{
pos += 7;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
irc_server_set_prefix_modes_chars (server, pos);
if (pos2)
pos2[0] = ' ';
}
/* save max nick length */
pos = strstr (argv_eol[3], "NICKLEN=");
if (pos)
{
pos += 8;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
error = NULL;
value = strtol (pos, &error, 10);
if (error && !error[0] && (value > 0))
server->nick_max_length = (int)value;
if (pos2)
pos2[0] = ' ';
}
/* save max user length */
pos = strstr (argv_eol[3], "USERLEN=");
if (pos)
{
pos += 8;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
error = NULL;
value = strtol (pos, &error, 10);
if (error && !error[0] && (value > 0))
server->user_max_length = (int)value;
if (pos2)
pos2[0] = ' ';
}
/* save max host length */
pos = strstr (argv_eol[3], "HOSTLEN=");
if (pos)
{
pos += 8;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
error = NULL;
value = strtol (pos, &error, 10);
if (error && !error[0] && (value > 0))
server->host_max_length = (int)value;
if (pos2)
pos2[0] = ' ';
}
/* save casemapping */
pos = strstr (argv_eol[3], "CASEMAPPING=");
if (pos)
{
pos += 12;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
casemapping = irc_server_search_casemapping (pos);
if (casemapping >= 0)
server->casemapping = casemapping;
if (pos2)
pos2[0] = ' ';
}
/* save chantypes */
pos = strstr (argv_eol[3], "CHANTYPES=");
if (pos)
{
pos += 10;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
if (server->chantypes)
free (server->chantypes);
server->chantypes = strdup (pos);
if (pos2)
pos2[0] = ' ';
}
/* save chanmodes */
pos = strstr (argv_eol[3], "CHANMODES=");
if (pos)
{
pos += 10;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
if (server->chanmodes)
free (server->chanmodes);
server->chanmodes = strdup (pos);
if (pos2)
pos2[0] = ' ';
}
/* save monitor (limit) */
pos = strstr (argv_eol[3], "MONITOR=");
if (pos)
{
pos += 8;
pos2 = strchr (pos, ' ');
if (pos2)
pos2[0] = '\0';
error = NULL;
value = strtol (pos, &error, 10);
if (error && !error[0] && (value > 0))
server->monitor = (int)value;
if (pos2)
pos2[0] = ' ';
}
/* save whole message (concatenate to existing isupport, if any) */
pos_start = NULL;
pos = strstr (argv_eol[3], " :");
length = (pos) ? pos - argv_eol[3] : (int)strlen (argv_eol[3]);
if (server->isupport)
{
length_isupport = strlen (server->isupport);
isupport2 = realloc (server->isupport,
length_isupport + /* existing */
1 + length + 1); /* new */
if (isupport2)
{
server->isupport = isupport2;
pos_start = server->isupport + length_isupport;
}
}
else
{
server->isupport = malloc (1 + length + 1);
if (server->isupport)
pos_start = server->isupport;
}
if (pos_start)
{
pos_start[0] = ' ';
memcpy (pos_start + 1, argv_eol[3], length);
pos_start[length + 1] = '\0';
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "008": server notice mask.
*
* Message looks like:
* :server 008 nick +Zbfkrsuy :Server notice mask
*/
IRC_PROTOCOL_CALLBACK(008)
{
IRC_PROTOCOL_MIN_ARGS(4);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, argv[2], command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, address),
_("%sServer notice mask for %s%s%s: %s"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, NULL, argv[2]),
argv[2],
IRC_COLOR_RESET,
(argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "221": user mode string.
*
* Message looks like:
* :server 221 nick :+s
*/
IRC_PROTOCOL_CALLBACK(221)
{
IRC_PROTOCOL_MIN_ARGS(4);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, argv[2], command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, address),
_("%sUser mode for %s%s%s is %s[%s%s%s]"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, NULL, argv[2]),
argv[2],
IRC_COLOR_RESET,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3],
IRC_COLOR_CHAT_DELIMITERS);
if (irc_server_strcasecmp (server, argv[2], server->nick) == 0)
{
irc_mode_user_set (
server,
(argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3],
1);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "301": away message.
*
* Message is received when we are talking to a user in private and that remote
* user is away (we receive away message).
*
* Message looks like:
* :server 301 mynick nick :away message for nick
*/
IRC_PROTOCOL_CALLBACK(301)
{
char *pos_away_msg;
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(3);
if (argc > 4)
{
pos_away_msg = (argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4];
/* look for private buffer to display message */
ptr_channel = irc_channel_search (server, argv[3]);
if (!weechat_config_boolean (irc_config_look_display_pv_away_once)
|| !ptr_channel
|| !(ptr_channel->away_message)
|| (strcmp (ptr_channel->away_message, pos_away_msg) != 0))
{
ptr_buffer = (ptr_channel) ? ptr_channel->buffer : server->buffer;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, address),
_("%s%s[%s%s%s]%s is away: %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
pos_away_msg);
if (ptr_channel)
{
if (ptr_channel->away_message)
free (ptr_channel->away_message);
ptr_channel->away_message = strdup (pos_away_msg);
}
}
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "303": ison.
*
* Message looks like:
* :server 303 mynick :nick1 nick2
*/
IRC_PROTOCOL_CALLBACK(303)
{
IRC_PROTOCOL_MIN_ARGS(4);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sUsers online: %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_NICK,
(argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "305": unaway.
*
* Message looks like:
* :server 305 mynick :Does this mean you're really back?
*/
IRC_PROTOCOL_CALLBACK(305)
{
IRC_PROTOCOL_MIN_ARGS(3);
if (argc > 3)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "unaway", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
(argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]);
}
server->is_away = 0;
server->away_time = 0;
weechat_bar_item_update ("away");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "306": now away.
*
* Message looks like:
* :server 306 mynick :We'll miss you
*/
IRC_PROTOCOL_CALLBACK(306)
{
IRC_PROTOCOL_MIN_ARGS(3);
if (argc > 3)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "away", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
(argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]);
}
server->is_away = 1;
server->away_time = time (NULL);
weechat_bar_item_update ("away");
return WEECHAT_RC_OK;
}
/*
* Callback for the whois messages with nick and message.
*
* Message looks like:
* :server 319 flashy FlashCode :some text here
*/
IRC_PROTOCOL_CALLBACK(whois_nick_msg)
{
IRC_PROTOCOL_MIN_ARGS(5);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
return WEECHAT_RC_OK;
}
/*
* Callback for the whowas messages with nick and message.
*
* Message looks like:
* :server 369 flashy FlashCode :some text here
*/
IRC_PROTOCOL_CALLBACK(whowas_nick_msg)
{
IRC_PROTOCOL_MIN_ARGS(5);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whowas", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "311": whois, user.
*
* Message looks like:
* :server 311 mynick nick user host * :realname here
*/
IRC_PROTOCOL_CALLBACK(311)
{
IRC_PROTOCOL_MIN_ARGS(8);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] (%s%s@%s%s)%s: %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[7][0] == ':') ? argv_eol[7] + 1 : argv_eol[7]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "312": whois, server.
*
* Message looks like:
* :server 312 mynick nick chat.freenode.net :https://freenode.net/
*/
IRC_PROTOCOL_CALLBACK(312)
{
IRC_PROTOCOL_MIN_ARGS(6);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s(%s%s%s)",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
argv[4],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5],
IRC_COLOR_CHAT_DELIMITERS);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "314": whowas.
*
* Message looks like:
* :server 314 mynick nick user host * :realname here
*/
IRC_PROTOCOL_CALLBACK(314)
{
IRC_PROTOCOL_MIN_ARGS(8);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whowas", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s] (%s%s@%s%s)%s was %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[7][0] == ':') ? argv_eol[7] + 1 : argv_eol[7]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "315": end of /who.
*
* Message looks like:
* :server 315 mynick #channel :End of /WHO list.
*/
IRC_PROTOCOL_CALLBACK(315)
{
struct t_irc_channel *ptr_channel;
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
if (ptr_channel && (ptr_channel->checking_whox > 0))
{
ptr_channel->checking_whox--;
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "who", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s]%s %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "317": whois, idle.
*
* Message looks like:
* :server 317 mynick nick 122877 1205327880 :seconds idle, signon time
*/
IRC_PROTOCOL_CALLBACK(317)
{
int idle_time, day, hour, min, sec;
time_t datetime;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(6);
idle_time = atoi (argv[4]);
day = idle_time / (60 * 60 * 24);
hour = (idle_time % (60 * 60 * 24)) / (60 * 60);
min = ((idle_time % (60 * 60 * 24)) % (60 * 60)) / 60;
sec = ((idle_time % (60 * 60 * 24)) % (60 * 60)) % 60;
datetime = (time_t)(atol (argv[5]));
ptr_buffer = irc_msgbuffer_get_target_buffer (server, argv[3],
command, "whois", NULL);
if (day > 0)
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s]%s idle: %s%d %s%s, %s%02d %s%s %s%02d %s%s %s%02d "
"%s%s, signon at: %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
day,
IRC_COLOR_RESET,
NG_("day", "days", day),
IRC_COLOR_CHAT_CHANNEL,
hour,
IRC_COLOR_RESET,
NG_("hour", "hours", hour),
IRC_COLOR_CHAT_CHANNEL,
min,
IRC_COLOR_RESET,
NG_("minute", "minutes", min),
IRC_COLOR_CHAT_CHANNEL,
sec,
IRC_COLOR_RESET,
NG_("second", "seconds", sec),
IRC_COLOR_CHAT_CHANNEL,
weechat_util_get_time_string (&datetime));
}
else
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s]%s idle: %s%02d %s%s %s%02d %s%s %s%02d %s%s, "
"signon at: %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
hour,
IRC_COLOR_RESET,
NG_("hour", "hours", hour),
IRC_COLOR_CHAT_CHANNEL,
min,
IRC_COLOR_RESET,
NG_("minute", "minutes", min),
IRC_COLOR_CHAT_CHANNEL,
sec,
IRC_COLOR_RESET,
NG_("second", "seconds", sec),
IRC_COLOR_CHAT_CHANNEL,
weechat_util_get_time_string (&datetime));
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "321": /list start.
*
* Message looks like:
* :server 321 mynick Channel :Users Name
*/
IRC_PROTOCOL_CALLBACK(321)
{
char *pos_args;
IRC_PROTOCOL_MIN_ARGS(4);
pos_args = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "list", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s%s%s",
weechat_prefix ("network"),
argv[3],
(pos_args) ? " " : "",
(pos_args) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "322": channel for /list.
*
* Message looks like:
* :server 322 mynick #channel 3 :topic of channel
*/
IRC_PROTOCOL_CALLBACK(322)
{
char *pos_topic;
IRC_PROTOCOL_MIN_ARGS(5);
pos_topic = (argc > 5) ?
((argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5]) : NULL;
if (!server->cmd_list_regexp ||
(regexec (server->cmd_list_regexp, argv[3], 0, NULL, 0) == 0))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "list", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s%s%s(%s%s%s)%s%s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
argv[4],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_topic && pos_topic[0]) ? ": " : "",
(pos_topic && pos_topic[0]) ? pos_topic : "");
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "323": end of /list.
*
* Message looks like:
* :server 323 mynick :End of /LIST
*/
IRC_PROTOCOL_CALLBACK(323)
{
char *pos_args;
IRC_PROTOCOL_MIN_ARGS(3);
pos_args = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, "list", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
(pos_args && pos_args[0]) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "324": channel mode.
*
* Message looks like:
* :server 324 mynick #channel +nt
*/
IRC_PROTOCOL_CALLBACK(324)
{
const char *ptr_modes, *ptr_modes_args;
struct t_irc_channel *ptr_channel;
IRC_PROTOCOL_MIN_ARGS(4);
ptr_modes = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
ptr_modes_args = (argc > 5) ?
((argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5]) : NULL;
ptr_channel = irc_channel_search (server, argv[3]);
if (ptr_channel)
{
irc_channel_set_modes (ptr_channel, ptr_modes);
if (argc > 4)
{
(void) irc_mode_channel_set (server, ptr_channel, host,
ptr_modes, ptr_modes_args);
}
}
if (!ptr_channel
|| (weechat_hashtable_has_key (ptr_channel->join_msg_received, command)
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, command)))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL,
(ptr_channel) ? ptr_channel->buffer : NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, address),
_("%sMode %s%s %s[%s%s%s]"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(ptr_modes) ? ptr_modes : "",
IRC_COLOR_CHAT_DELIMITERS);
}
if (ptr_channel)
weechat_hashtable_set (ptr_channel->join_msg_received, command, "1");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "327": whois, host.
*
* Message looks like:
* :server 327 mynick nick host ip :real hostname/ip
*/
IRC_PROTOCOL_CALLBACK(327)
{
char *pos_realname;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(6);
pos_realname = (argc > 6) ?
((argv_eol[6][0] == ':') ? argv_eol[6] + 1 : argv_eol[6]) : NULL;
ptr_buffer = irc_msgbuffer_get_target_buffer (server, argv[3],
command, "whois", NULL);
if (pos_realname && pos_realname[0])
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s %s(%s%s%s)",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
pos_realname,
IRC_COLOR_CHAT_DELIMITERS);
}
else
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "328": channel URL.
*
* Message looks like:
* :server 328 mynick #channel :https://example.com/
*/
IRC_PROTOCOL_CALLBACK(328)
{
struct t_irc_channel *ptr_channel;
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
if (ptr_channel)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sURL for %s%s%s: %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ?
argv_eol[4] + 1 : argv_eol[4]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "329": channel creation date.
*
* Message looks like:
* :server 329 mynick #channel 1205327894
*/
IRC_PROTOCOL_CALLBACK(329)
{
struct t_irc_channel *ptr_channel;
time_t datetime;
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
datetime = (time_t)(atol ((argv_eol[4][0] == ':') ?
argv_eol[4] + 1 : argv_eol[4]));
if (ptr_channel)
{
if (weechat_hashtable_has_key (ptr_channel->join_msg_received, command)
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, command))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (command, "irc_numeric",
NULL, NULL),
/* TRANSLATORS: "%s" after "created on" is a date */
_("%sChannel created on %s"),
weechat_prefix ("network"),
weechat_util_get_time_string (&datetime));
}
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "created on" is a date */
_("%sChannel %s%s%s created on %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
weechat_util_get_time_string (&datetime));
}
if (ptr_channel)
weechat_hashtable_set (ptr_channel->join_msg_received, command, "1");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC messages "330" (whois, is logged in as), and "343"
* (whois, is opered as).
*
* Messages look like:
* :server 330 mynick nick1 nick2 :is logged in as
* :server 330 mynick #channel https://example.com/
* :server 343 mynick nick1 nick2 :is opered as
*/
IRC_PROTOCOL_CALLBACK(330_343)
{
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(5);
if (argc >= 6)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5],
irc_nick_color_for_msg (server, 1, NULL, argv[4]),
argv[4]);
}
else
{
ptr_channel = (irc_channel_is_channel (server, argv[3])) ?
irc_channel_search (server, argv[3]) : NULL;
ptr_buffer = (ptr_channel) ? ptr_channel->buffer : server->buffer;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "331": no topic for channel.
*
* Message looks like:
* :server 331 mynick #channel :There isn't a topic.
*/
IRC_PROTOCOL_CALLBACK(331)
{
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(4);
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel) ? ptr_channel->buffer : server->buffer;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, NULL, ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sNo topic set for channel %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "332": topic of channel.
*
* Message looks like:
* :server 332 mynick #channel :topic of channel
*/
IRC_PROTOCOL_CALLBACK(332)
{
char *pos_topic, *topic_no_color, *topic_color;
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(4);
pos_topic = NULL;
if (argc >= 5)
pos_topic = (argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4];
ptr_channel = irc_channel_search (server, argv[3]);
if (ptr_channel && ptr_channel->nicks)
{
if (pos_topic)
{
topic_no_color = (weechat_config_boolean (irc_config_network_colors_receive)) ?
NULL : irc_color_decode (pos_topic, 0);
irc_channel_set_topic (ptr_channel,
(topic_no_color) ? topic_no_color : pos_topic);
if (topic_no_color)
free (topic_no_color);
}
ptr_buffer = ptr_channel->buffer;
}
else
ptr_buffer = server->buffer;
topic_color = NULL;
if (pos_topic)
{
topic_color = irc_color_decode (pos_topic,
(weechat_config_boolean (irc_config_network_colors_receive)) ? 1 : 0);
}
if (!ptr_channel
|| (weechat_hashtable_has_key (ptr_channel->join_msg_received, command))
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, command))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sTopic for %s%s%s is \"%s%s%s\""),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
IRC_COLOR_TOPIC_CURRENT,
(topic_color) ? topic_color : ((pos_topic) ? pos_topic : ""),
IRC_COLOR_RESET);
}
if (topic_color)
free (topic_color);
if (ptr_channel)
weechat_hashtable_set (ptr_channel->join_msg_received, command, "1");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "333": infos about topic (nick / date).
*
* Message looks like:
* :server 333 mynick #channel nick!user@host 1205428096
* :server 333 mynick #channel 1205428096
*/
IRC_PROTOCOL_CALLBACK(333)
{
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
time_t datetime;
const char *topic_nick, *topic_address;
int arg_date;
IRC_PROTOCOL_MIN_ARGS(5);
topic_nick = (argc > 5) ? irc_message_get_nick_from_host (argv[4]) : NULL;
topic_address = (argc > 5) ? irc_message_get_address_from_host (argv[4]) : NULL;
if (topic_nick && topic_address && strcmp (topic_nick, topic_address) == 0)
topic_address = NULL;
ptr_channel = irc_channel_search (server, argv[3]);
ptr_nick = (ptr_channel) ?
irc_nick_search (server, ptr_channel, topic_nick) : NULL;
arg_date = (argc > 5) ? 5 : 4;
datetime = (time_t)(atol ((argv_eol[arg_date][0] == ':') ?
argv_eol[arg_date] + 1 : argv_eol[arg_date]));
if (!topic_nick && (datetime == 0))
return WEECHAT_RC_OK;
if (ptr_channel && ptr_channel->nicks)
{
if (weechat_hashtable_has_key (ptr_channel->join_msg_received, command)
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, command))
{
if (topic_nick)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%sTopic set by %s%s%s%s%s%s%s%s%s on %s"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, ptr_nick, topic_nick),
topic_nick,
IRC_COLOR_CHAT_DELIMITERS,
(topic_address && topic_address[0]) ? " (" : "",
IRC_COLOR_CHAT_HOST,
(topic_address) ? topic_address : "",
IRC_COLOR_CHAT_DELIMITERS,
(topic_address && topic_address[0]) ? ")" : "",
IRC_COLOR_RESET,
weechat_util_get_time_string (&datetime));
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, ptr_channel->buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%sTopic set on %s"),
weechat_prefix ("network"),
weechat_util_get_time_string (&datetime));
}
}
}
else
{
if (topic_nick)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%sTopic for %s%s%s set by %s%s%s%s%s%s%s%s%s on %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 1, ptr_nick, topic_nick),
topic_nick,
IRC_COLOR_CHAT_DELIMITERS,
(topic_address && topic_address[0]) ? " (" : "",
IRC_COLOR_CHAT_HOST,
(topic_address) ? topic_address : "",
IRC_COLOR_CHAT_DELIMITERS,
(topic_address && topic_address[0]) ? ")" : "",
IRC_COLOR_RESET,
weechat_util_get_time_string (&datetime));
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%sTopic for %s%s%s set on %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
weechat_util_get_time_string (&datetime));
}
}
if (ptr_channel)
weechat_hashtable_set (ptr_channel->join_msg_received, command, "1");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "338": whois, host.
*
* Message looks like:
* :server 338 mynick nick host :actually using host
*/
IRC_PROTOCOL_CALLBACK(338)
{
IRC_PROTOCOL_MIN_ARGS(6);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, argv[3], command, "whois", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s]%s %s %s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5],
IRC_COLOR_CHAT_HOST,
argv[4]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "341": inviting.
*
* Message looks like:
* :server 341 mynick nick #channel
* :server 341 mynick nick :#channel
*/
IRC_PROTOCOL_CALLBACK(341)
{
char *pos_channel;
IRC_PROTOCOL_MIN_ARGS(5);
pos_channel = (argv[4][0] == ':') ? argv[4] + 1 : argv[4];
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, argv[2], command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", argv[2], address),
_("%s%s%s%s has invited %s%s%s to %s%s%s"),
weechat_prefix ("network"),
irc_nick_color_for_msg (server, 1, NULL, argv[2]),
argv[2],
IRC_COLOR_RESET,
irc_nick_color_for_msg (server, 1, NULL, argv[3]),
argv[3],
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
pos_channel,
IRC_COLOR_RESET);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "344": channel reop.
*
* Message looks like:
* :server 344 mynick #channel nick!user@host
*/
IRC_PROTOCOL_CALLBACK(344)
{
IRC_PROTOCOL_MIN_ARGS(5);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, "reop", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sChannel reop %s%s%s: %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
IRC_COLOR_CHAT_HOST,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "345": end of channel reop.
*
* Message looks like:
* :server 345 mynick #channel :End of Channel Reop List
*/
IRC_PROTOCOL_CALLBACK(345)
{
IRC_PROTOCOL_MIN_ARGS(5);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, "reop", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s%s%s: %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "346": channel invite list.
*
* Message looks like:
* :server 346 mynick #channel invitemask nick!user@host 1205590879
* :server 346 mynick #channel invitemask
*/
IRC_PROTOCOL_CALLBACK(346)
{
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
time_t datetime;
const char *nick_address;
char str_number[64];
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, 'I');
if (ptr_modelist)
{
/* start receiving new list */
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
irc_modelist_item_free_all (ptr_modelist);
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVING;
}
snprintf (str_number, sizeof (str_number),
"%s[%s%d%s] ",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
((ptr_modelist->last_item) ? ptr_modelist->last_item->number + 1 : 0) + 1,
IRC_COLOR_CHAT_DELIMITERS);
}
else
str_number[0] = '\0';
if (argc >= 6)
{
nick_address = irc_protocol_nick_address (
server, 1, NULL, irc_message_get_nick_from_host (argv[5]),
irc_message_get_address_from_host (argv[5]));
if (argc >= 7)
{
datetime = (time_t)(atol ((argv[6][0] == ':') ? argv[6] + 1 : argv[6]));
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], argv[5], datetime);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "invitelist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%s%s[%s%s%s] %s%s%s%s invited by %s on %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?",
weechat_util_get_time_string (&datetime));
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], argv[5], 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "invitelist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s] %s%s%s%s invited by %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?");
}
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], NULL, 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "invitelist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s] %s%s%s%s invited"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "347": end of channel invite list.
*
* Message looks like:
* :server 347 mynick #channel :End of Channel Invite List
*/
IRC_PROTOCOL_CALLBACK(347)
{
char *pos_args;
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
IRC_PROTOCOL_MIN_ARGS(4);
pos_args = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, 'I');
if (ptr_modelist)
{
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
/*
* remove all items if no invite was received before
* the end of invite list
*/
irc_modelist_item_free_all (ptr_modelist);
}
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVED;
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "invitelist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s]%s%s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_args) ? " " : "",
(pos_args) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "348": channel exception list.
*
* Message looks like:
* :server 348 mynick #channel nick1!user1@host1 nick2!user2@host2 1205585109
* (nick2 is nick who set exception on nick1)
*/
IRC_PROTOCOL_CALLBACK(348)
{
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
time_t datetime;
const char *nick_address;
char str_number[64];
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, 'e');
if (ptr_modelist)
{
/* start receiving new list */
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
irc_modelist_item_free_all (ptr_modelist);
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVING;
}
snprintf (str_number, sizeof (str_number),
" %s[%s%d%s]",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
((ptr_modelist->last_item) ? ptr_modelist->last_item->number + 1 : 0) + 1,
IRC_COLOR_CHAT_DELIMITERS);
}
else
str_number[0] = '\0';
if (argc >= 6)
{
nick_address = irc_protocol_nick_address (
server, 1, NULL, irc_message_get_nick_from_host (argv[5]),
irc_message_get_address_from_host (argv[5]));
if (argc >= 7)
{
datetime = (time_t)(atol ((argv[6][0] == ':') ? argv[6] + 1 : argv[6]));
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], argv[5], datetime);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "exceptionlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%s%s[%s%s%s]%s%s exception %s%s%s by %s on %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?",
weechat_util_get_time_string (&datetime));
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], argv[5], 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "exceptionlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s]%s%s exception %s%s%s by %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?");
}
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], NULL, 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "exceptionlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s]%s%s exception %s%s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_HOST,
argv[4]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "349": end of channel exception list.
*
* Message looks like:
* :server 349 mynick #channel :End of Channel Exception List
*/
IRC_PROTOCOL_CALLBACK(349)
{
char *pos_args;
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
IRC_PROTOCOL_MIN_ARGS(4);
pos_args = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, 'e');
if (ptr_modelist)
{
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
/*
* remove all items if no exception was received before
* the end of exception list
*/
irc_modelist_item_free_all (ptr_modelist);
}
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVED;
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "exceptionlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s]%s%s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_args) ? " " : "",
(pos_args) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "351": server version.
*
* Message looks like:
* :server 351 mynick dancer-ircd-1.0.36(2006/07/23_13:11:50). server :iMZ dncrTS/v4
*/
IRC_PROTOCOL_CALLBACK(351)
{
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(5);
ptr_buffer = irc_msgbuffer_get_target_buffer (server, NULL, command, NULL,
NULL);
if (argc > 5)
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s %s (%s)",
weechat_prefix ("network"),
argv[3],
argv[4],
(argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5]);
}
else
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s %s",
weechat_prefix ("network"),
argv[3],
argv[4]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "352": who.
*
* Message looks like:
* :server 352 mynick #channel user host server nick (*) (H/G) :0 flashcode
*/
IRC_PROTOCOL_CALLBACK(352)
{
char *pos_attr, *pos_hopcount, *pos_realname, *str_host;
int arg_start, length;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
IRC_PROTOCOL_MIN_ARGS(5);
/* silently ignore malformed 352 message (missing infos) */
if (argc < 8)
return WEECHAT_RC_OK;
pos_attr = NULL;
pos_hopcount = NULL;
pos_realname = NULL;
if (argc > 8)
{
arg_start = (strcmp (argv[8], "*") == 0) ? 9 : 8;
if (argv[arg_start][0] == ':')
{
pos_attr = NULL;
pos_hopcount = (argc > arg_start) ? argv[arg_start] + 1 : NULL;
pos_realname = (argc > arg_start + 1) ? argv_eol[arg_start + 1] : NULL;
}
else
{
pos_attr = argv[arg_start];
pos_hopcount = (argc > arg_start + 1) ? argv[arg_start + 1] + 1 : NULL;
pos_realname = (argc > arg_start + 2) ? argv_eol[arg_start + 2] : NULL;
}
}
ptr_channel = irc_channel_search (server, argv[3]);
ptr_nick = (ptr_channel) ?
irc_nick_search (server, ptr_channel, argv[7]) : NULL;
/* update host in nick */
if (ptr_nick)
{
length = strlen (argv[4]) + 1 + strlen (argv[5]) + 1;
str_host = malloc (length);
if (str_host)
{
snprintf (str_host, length, "%s@%s", argv[4], argv[5]);
irc_nick_set_host (ptr_nick, str_host);
free (str_host);
}
}
/* update away flag in nick */
if (ptr_channel && ptr_nick && pos_attr)
{
irc_nick_set_away (server, ptr_channel, ptr_nick,
(pos_attr[0] == 'G') ? 1 : 0);
}
/* update realname in nick */
if (ptr_channel && ptr_nick && pos_realname)
{
if (ptr_nick->realname)
free (ptr_nick->realname);
if (pos_realname &&
weechat_hashtable_has_key (server->cap_list, "extended-join"))
{
ptr_nick->realname = strdup (pos_realname);
}
else
{
ptr_nick->realname = NULL;
}
}
/* display output of who (manual who from user) */
if (!ptr_channel || (ptr_channel->checking_whox <= 0))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "who", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s(%s%s@%s%s)%s %s%s%s%s(%s)",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[7]),
argv[7],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_attr) ? pos_attr : "",
(pos_attr) ? " " : "",
(pos_hopcount) ? pos_hopcount : "",
(pos_hopcount) ? " " : "",
(pos_realname) ? pos_realname : "");
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "353": list of users on a channel.
*
* Message looks like:
* :server 353 mynick = #channel :mynick nick1 @nick2 +nick3
*/
IRC_PROTOCOL_CALLBACK(353)
{
char *pos_channel, *pos_nick, *pos_nick_orig, *pos_host, *nickname;
char *prefixes, *str_nicks, *color;
int args, i, length;
struct t_irc_channel *ptr_channel;
IRC_PROTOCOL_MIN_ARGS(5);
if (irc_channel_is_channel (server, argv[3]))
{
pos_channel = argv[3];
args = 4;
}
else
{
pos_channel = argv[4];
args = 5;
}
IRC_PROTOCOL_MIN_ARGS(args + 1);
ptr_channel = irc_channel_search (server, pos_channel);
str_nicks = NULL;
/*
* for a channel without buffer, prepare a string that will be built
* with nicks and colors (argc - args is the number of nicks)
*/
if (!ptr_channel)
{
/*
* prefix color (16) + nick color (16) + reset color (16) = 48 bytes
* added for each nick
*/
length = strlen (argv_eol[args]) + ((argc - args) * (16 + 16 + 16)) + 1;
str_nicks = malloc (length);
if (str_nicks)
str_nicks[0] = '\0';
}
for (i = args; i < argc; i++)
{
pos_nick = (argv[i][0] == ':') ? argv[i] + 1 : argv[i];
pos_nick_orig = pos_nick;
/* skip and save prefix(es) */
while (pos_nick[0]
&& (irc_server_get_prefix_char_index (server, pos_nick[0]) >= 0))
{
pos_nick++;
}
prefixes = (pos_nick > pos_nick_orig) ?
weechat_strndup (pos_nick_orig, pos_nick - pos_nick_orig) : NULL;
/* extract nick from host */
pos_host = strchr (pos_nick, '!');
if (pos_host)
{
nickname = weechat_strndup (pos_nick, pos_host - pos_nick);
pos_host++;
}
else
nickname = strdup (pos_nick);
/* add or update nick on channel */
if (nickname)
{
if (ptr_channel && ptr_channel->nicks)
{
if (!irc_nick_new (server, ptr_channel, nickname, pos_host,
prefixes, 0, NULL, NULL))
{
weechat_printf (
server->buffer,
_("%s%s: cannot create nick \"%s\" for channel \"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME, nickname,
ptr_channel->name);
}
}
else if (!ptr_channel && str_nicks)
{
if (str_nicks[0])
{
strcat (str_nicks, IRC_COLOR_RESET);
strcat (str_nicks, " ");
}
if (prefixes)
{
strcat (str_nicks,
weechat_color (
irc_nick_get_prefix_color_name (server,
prefixes[0])));
strcat (str_nicks, prefixes);
}
if (weechat_config_boolean (irc_config_look_color_nicks_in_names))
{
if (irc_server_strcasecmp (server, nickname, server->nick) == 0)
strcat (str_nicks, IRC_COLOR_CHAT_NICK_SELF);
else
{
color = irc_nick_find_color (nickname);
strcat (str_nicks, color);
if (color)
free (color);
}
}
else
strcat (str_nicks, IRC_COLOR_RESET);
strcat (str_nicks, nickname);
}
free (nickname);
}
if (prefixes)
free (prefixes);
}
if (!ptr_channel)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sNicks %s%s%s: %s[%s%s%s]"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
pos_channel,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(str_nicks) ? str_nicks : "",
IRC_COLOR_CHAT_DELIMITERS);
}
if (str_nicks)
free (str_nicks);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "354": WHOX output
*
* Message looks like:
* :server 354 mynick #channel user host server nick status hopcount account :GECOS Information
*/
IRC_PROTOCOL_CALLBACK(354)
{
char *pos_attr, *pos_hopcount, *pos_account, *pos_realname, *str_host;
int length;
struct t_irc_channel *ptr_channel;
struct t_irc_nick *ptr_nick;
IRC_PROTOCOL_MIN_ARGS(4);
ptr_channel = irc_channel_search (server, argv[3]);
/*
* if there are less than 11 arguments, we are unable to parse the message,
* some infos are missing but we don't know which ones; in this case we
* just display the message as-is
*/
if (argc < 11)
{
if (!ptr_channel || (ptr_channel->checking_whox <= 0))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "who", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s]%s%s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(argc > 4) ? " " : "",
(argc > 4) ? argv_eol[4] : "");
}
return WEECHAT_RC_OK;
}
ptr_nick = (ptr_channel) ?
irc_nick_search (server, ptr_channel, argv[7]) : NULL;
pos_attr = argv[8];
pos_hopcount = argv[9];
pos_account = (strcmp (argv[10], "0") != 0) ? argv[10] : NULL;
pos_realname = (argc > 11) ?
((argv_eol[11][0] == ':') ? argv_eol[11] + 1 : argv_eol[11]) : NULL;
/* update host in nick */
if (ptr_nick)
{
length = strlen (argv[4]) + 1 + strlen (argv[5]) + 1;
str_host = malloc (length);
if (str_host)
{
snprintf (str_host, length, "%s@%s", argv[4], argv[5]);
irc_nick_set_host (ptr_nick, str_host);
free (str_host);
}
}
/* update away flag in nick */
if (ptr_channel && ptr_nick)
{
irc_nick_set_away (server, ptr_channel, ptr_nick,
(pos_attr && (pos_attr[0] == 'G')) ? 1 : 0);
}
/* update account flag in nick */
if (ptr_nick)
{
if (ptr_nick->account)
free (ptr_nick->account);
if (ptr_channel && pos_account
&& weechat_hashtable_has_key (server->cap_list, "account-notify"))
{
ptr_nick->account = strdup (pos_account);
}
else
{
ptr_nick->account = NULL;
}
}
/* update realname in nick */
if (ptr_nick)
{
if (ptr_nick->realname)
free (ptr_nick->realname);
if (ptr_channel && pos_realname
&& weechat_hashtable_has_key (server->cap_list, "extended-join"))
{
ptr_nick->realname = strdup (pos_realname);
}
else
{
ptr_nick->realname = NULL;
}
}
/* display output of who (manual who from user) */
if (!ptr_channel || (ptr_channel->checking_whox <= 0))
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "who", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s] %s%s %s%s%s%s%s%s(%s%s@%s%s)%s %s%s%s%s(%s)",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
irc_nick_color_for_msg (server, 1, NULL, argv[7]),
argv[7],
IRC_COLOR_CHAT_DELIMITERS,
(pos_account) ? "[" : "",
(pos_account) ? IRC_COLOR_CHAT_HOST : "",
(pos_account) ? pos_account : "",
(pos_account) ? IRC_COLOR_CHAT_DELIMITERS : "",
(pos_account) ? "] " : "",
IRC_COLOR_CHAT_HOST,
argv[4],
argv[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_attr) ? pos_attr : "",
(pos_attr) ? " " : "",
(pos_hopcount) ? pos_hopcount : "",
(pos_hopcount) ? " " : "",
(pos_realname) ? pos_realname : "");
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "366": end of /names list.
*
* Message looks like:
* :server 366 mynick #channel :End of /NAMES list.
*/
IRC_PROTOCOL_CALLBACK(366)
{
struct t_irc_channel *ptr_channel;
struct t_infolist *infolist;
struct t_config_option *ptr_option;
int num_nicks, num_op, num_halfop, num_voice, num_normal, length, i;
char *string, str_nicks_count[2048], *color;
const char *prefix, *prefix_color, *nickname;
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
if (ptr_channel && ptr_channel->nicks)
{
/* display users on channel */
if (weechat_hashtable_has_key (ptr_channel->join_msg_received, "353")
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, "353"))
{
infolist = weechat_infolist_get ("nicklist", ptr_channel->buffer, NULL);
if (infolist)
{
length = 0;
while (weechat_infolist_next (infolist))
{
if (strcmp (weechat_infolist_string (infolist, "type"),
"nick") == 0)
{
ptr_option = weechat_config_get (weechat_infolist_string (infolist,
"prefix_color"));
length +=
((ptr_option) ? strlen (weechat_color (weechat_config_string (ptr_option))) : 0) +
strlen (weechat_infolist_string (infolist, "prefix")) +
16 + /* nick color */
strlen (weechat_infolist_string (infolist, "name")) +
16 + /* reset color */
1; /* space */
}
}
if (length > 0)
{
string = malloc (length);
if (string)
{
string[0] = '\0';
i = 0;
while (weechat_infolist_next (infolist))
{
if (strcmp (weechat_infolist_string (infolist, "type"),
"nick") == 0)
{
if (i > 0)
{
strcat (string, IRC_COLOR_RESET);
strcat (string, " ");
}
prefix = weechat_infolist_string (infolist, "prefix");
if (prefix[0] && (prefix[0] != ' '))
{
prefix_color = weechat_infolist_string (infolist,
"prefix_color");
if (strchr (prefix_color, '.'))
{
ptr_option = weechat_config_get (weechat_infolist_string (infolist,
"prefix_color"));
if (ptr_option)
strcat (string, weechat_color (weechat_config_string (ptr_option)));
}
else
{
strcat (string, weechat_color (prefix_color));
}
strcat (string, prefix);
}
nickname = weechat_infolist_string (infolist, "name");
if (weechat_config_boolean (irc_config_look_color_nicks_in_names))
{
if (irc_server_strcasecmp (server, nickname, server->nick) == 0)
strcat (string, IRC_COLOR_CHAT_NICK_SELF);
else
{
color = irc_nick_find_color (nickname);
strcat (string, color);
if (color)
free (color);
}
}
else
strcat (string, IRC_COLOR_RESET);
strcat (string, nickname);
i++;
}
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names",
ptr_channel->buffer),
date,
irc_protocol_tags (
command, "irc_numeric", NULL, NULL),
_("%sNicks %s%s%s: %s[%s%s]"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
ptr_channel->name,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_DELIMITERS,
string,
IRC_COLOR_CHAT_DELIMITERS);
free (string);
}
}
weechat_infolist_free (infolist);
}
}
/* display number of nicks, ops, halfops & voices on the channel */
if (weechat_hashtable_has_key (ptr_channel->join_msg_received, "366")
|| weechat_hashtable_has_key (irc_config_hashtable_display_join_message, "366"))
{
irc_nick_count (server, ptr_channel, &num_nicks, &num_op, &num_halfop,
&num_voice, &num_normal);
str_nicks_count[0] = '\0';
if (irc_server_get_prefix_mode_index (server, 'o') >= 0)
{
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_op,
IRC_COLOR_RESET,
NG_("op", "ops", num_op));
}
if (irc_server_get_prefix_mode_index (server, 'h') >= 0)
{
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_halfop,
IRC_COLOR_RESET,
NG_("halfop", "halfops", num_halfop));
}
if (irc_server_get_prefix_mode_index (server, 'v') >= 0)
{
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_voice,
IRC_COLOR_RESET,
NG_("voice", "voices", num_voice));
}
length = strlen (str_nicks_count);
snprintf (str_nicks_count + length,
sizeof (str_nicks_count) - length,
"%s%s%d%s %s",
(str_nicks_count[0]) ? ", " : "",
IRC_COLOR_CHAT_CHANNEL,
num_normal,
IRC_COLOR_RESET,
NG_("normal", "normals", num_normal));
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names", ptr_channel->buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%sChannel %s%s%s: %s%d%s %s %s(%s%s)"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
ptr_channel->name,
IRC_COLOR_RESET,
IRC_COLOR_CHAT_CHANNEL,
num_nicks,
IRC_COLOR_RESET,
NG_("nick", "nicks", num_nicks),
IRC_COLOR_CHAT_DELIMITERS,
str_nicks_count,
IRC_COLOR_CHAT_DELIMITERS);
}
if (!weechat_hashtable_has_key (ptr_channel->join_msg_received, command))
{
irc_command_mode_server (server, "MODE", ptr_channel, NULL,
IRC_SERVER_SEND_OUTQ_PRIO_LOW);
irc_channel_check_whox (server, ptr_channel);
}
}
else
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "names", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s%s%s: %s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_RESET,
(argv[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]);
}
if (ptr_channel)
{
weechat_hashtable_set (ptr_channel->join_msg_received, "353", "1");
weechat_hashtable_set (ptr_channel->join_msg_received, "366", "1");
}
weechat_bar_item_update ("input_prompt");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "367": banlist.
*
* Message looks like:
* :server 367 mynick #channel banmask nick!user@host 1205590879
*/
IRC_PROTOCOL_CALLBACK(367)
{
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
time_t datetime;
const char *nick_address;
char str_number[64];
IRC_PROTOCOL_MIN_ARGS(5);
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, 'b');
if (ptr_modelist)
{
/* start receiving new list */
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
irc_modelist_item_free_all (ptr_modelist);
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVING;
}
snprintf (str_number, sizeof (str_number),
"%s[%s%d%s] ",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
((ptr_modelist->last_item) ? ptr_modelist->last_item->number + 1 : 0) + 1,
IRC_COLOR_CHAT_DELIMITERS);
}
else
str_number[0] = '\0';
if (argc >= 6)
{
nick_address = irc_protocol_nick_address (
server, 1, NULL, irc_message_get_nick_from_host (argv[5]),
irc_message_get_address_from_host (argv[5]));
if (argc >= 7)
{
datetime = (time_t)(atol ((argv[6][0] == ':') ? argv[6] + 1 : argv[6]));
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], argv[5], datetime);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "banlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%s%s[%s%s%s] %s%s%s%s banned by %s on %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?",
weechat_util_get_time_string (&datetime));
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], argv[5], 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "banlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s] %s%s%s%s banned by %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?");
}
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[4], NULL, 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "banlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s] %s%s%s%s banned"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[4],
IRC_COLOR_RESET);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "368": end of banlist.
*
* Message looks like:
* :server 368 mynick #channel :End of Channel Ban List
*/
IRC_PROTOCOL_CALLBACK(368)
{
char *pos_args;
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
IRC_PROTOCOL_MIN_ARGS(4);
pos_args = (argc > 4) ?
((argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4]) : NULL;
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, 'b');
if (ptr_modelist)
{
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
/*
* remove all items if no ban was received before
* the end of ban list
*/
irc_modelist_item_free_all (ptr_modelist);
}
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVED;
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "banlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s]%s%s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_args) ? " " : "",
(pos_args) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "432": erroneous nickname.
*
* Message looks like:
* :server 432 * mynick :Erroneous Nickname
*/
IRC_PROTOCOL_CALLBACK(432)
{
const char *alternate_nick;
struct t_gui_buffer *ptr_buffer;
irc_protocol_cb_generic_error (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
if (!server->is_connected)
{
ptr_buffer = irc_msgbuffer_get_target_buffer (server, NULL,
command, NULL, NULL);
alternate_nick = irc_server_get_alternate_nick (server);
if (!alternate_nick)
{
weechat_printf_date_tags (
ptr_buffer, date, NULL,
_("%s%s: all declared nicknames are already in use or "
"invalid, closing connection with server"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, 0, 1);
return WEECHAT_RC_OK;
}
weechat_printf_date_tags (
ptr_buffer, date, NULL,
_("%s%s: nickname \"%s\" is invalid, trying nickname \"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
server->nick, alternate_nick);
irc_server_set_nick (server, alternate_nick);
irc_server_sendf (
server, 0, NULL,
"NICK %s%s",
(server->nick && strchr (server->nick, ':')) ? ":" : "",
server->nick);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "433": nickname already in use.
*
* Message looks like:
* :server 433 * mynick :Nickname is already in use.
*/
IRC_PROTOCOL_CALLBACK(433)
{
const char *alternate_nick;
struct t_gui_buffer *ptr_buffer;
if (!server->is_connected)
{
ptr_buffer = irc_msgbuffer_get_target_buffer (server, NULL,
command, NULL, NULL);
alternate_nick = irc_server_get_alternate_nick (server);
if (!alternate_nick)
{
weechat_printf_date_tags (
ptr_buffer, date, NULL,
_("%s%s: all declared nicknames are already in use, closing "
"connection with server"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, 0, 1);
return WEECHAT_RC_OK;
}
weechat_printf_date_tags (
ptr_buffer, date, NULL,
_("%s%s: nickname \"%s\" is already in use, trying nickname "
"\"%s\""),
weechat_prefix ("network"), IRC_PLUGIN_NAME,
server->nick, alternate_nick);
irc_server_set_nick (server, alternate_nick);
irc_server_sendf (
server, 0, NULL,
"NICK %s%s",
(server->nick && strchr (server->nick, ':')) ? ":" : "",
server->nick);
}
else
{
return irc_protocol_cb_generic_error (server,
date, nick, address, host,
command, ignored, argc, argv,
argv_eol);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "437": nick/channel temporarily unavailable.
*
* Message looks like:
* :server 437 * mynick :Nick/channel is temporarily unavailable
*/
IRC_PROTOCOL_CALLBACK(437)
{
const char *alternate_nick;
struct t_gui_buffer *ptr_buffer;
irc_protocol_cb_generic_error (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
if (!server->is_connected)
{
if ((argc >= 4)
&& (irc_server_strcasecmp (server, server->nick, argv[3]) == 0))
{
ptr_buffer = irc_msgbuffer_get_target_buffer (server, NULL,
command, NULL, NULL);
alternate_nick = irc_server_get_alternate_nick (server);
if (!alternate_nick)
{
weechat_printf_date_tags (
ptr_buffer, date, NULL,
_("%s%s: all declared nicknames are already in use or "
"invalid, closing connection with server"),
weechat_prefix ("error"), IRC_PLUGIN_NAME);
irc_server_disconnect (server, 0, 1);
return WEECHAT_RC_OK;
}
weechat_printf_date_tags (
ptr_buffer, date, NULL,
_("%s%s: nickname \"%s\" is unavailable, trying nickname "
"\"%s\""),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
server->nick, alternate_nick);
irc_server_set_nick (server, alternate_nick);
irc_server_sendf (
server, 0, NULL,
"NICK %s%s",
(server->nick && strchr (server->nick, ':')) ? ":" : "",
server->nick);
}
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "438": not authorized to change nickname.
*
* Message looks like:
* :server 438 mynick newnick :Nick change too fast. Please wait 30 seconds.
*/
IRC_PROTOCOL_CALLBACK(438)
{
struct t_gui_buffer *ptr_buffer;
IRC_PROTOCOL_MIN_ARGS(4);
ptr_buffer = irc_msgbuffer_get_target_buffer (server, NULL,
command, NULL, NULL);
if (argc >= 5)
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s (%s => %s)",
weechat_prefix ("network"),
(argv_eol[4][0] == ':') ? argv_eol[4] + 1 : argv_eol[4],
argv[2],
argv[3]);
}
else
{
weechat_printf_date_tags (
ptr_buffer,
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s %s",
weechat_prefix ("network"),
argv[2],
argv[3]);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "470": forwarding to another channel.
*
* Message looks like:
* :server 470 mynick #channel ##channel :Forwarding to another channel
*/
IRC_PROTOCOL_CALLBACK(470)
{
struct t_gui_buffer *ptr_buffer;
struct t_gui_lines *own_lines;
const char *buffer_name, *short_name, *localvar_channel;
char *old_channel_lower, *new_channel_lower;
int lines_count;
irc_protocol_cb_generic_error (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
if ((argc >= 5) && !irc_channel_search (server, argv[3]))
{
ptr_buffer = irc_channel_search_buffer (server,
IRC_CHANNEL_TYPE_CHANNEL,
argv[3]);
if (ptr_buffer)
{
short_name = weechat_buffer_get_string (ptr_buffer, "short_name");
localvar_channel = weechat_buffer_get_string (ptr_buffer,
"localvar_channel");
if (!short_name
|| (localvar_channel
&& (strcmp (localvar_channel, short_name) == 0)))
{
/*
* update the short_name only if it was not changed by the
* user
*/
weechat_buffer_set (ptr_buffer, "short_name", argv[4]);
}
buffer_name = irc_buffer_build_name (server->name, argv[4]);
weechat_buffer_set (ptr_buffer, "name", buffer_name);
weechat_buffer_set (ptr_buffer, "localvar_set_channel", argv[4]);
/*
* check if logger backlog should be displayed for the new channel
* name: it is displayed only if the buffer is currently completely
* empty (no messages at all)
*/
lines_count = 0;
own_lines = weechat_hdata_pointer (weechat_hdata_get ("buffer"),
ptr_buffer, "own_lines");
if (own_lines)
{
lines_count = weechat_hdata_integer (
weechat_hdata_get ("lines"),
own_lines, "lines_count");
}
if (lines_count == 0)
{
(void) weechat_hook_signal_send ("logger_backlog",
WEECHAT_HOOK_SIGNAL_POINTER,
ptr_buffer);
}
}
old_channel_lower = strdup (argv[3]);
if (old_channel_lower)
{
weechat_string_tolower (old_channel_lower);
new_channel_lower = strdup (argv[4]);
if (new_channel_lower)
{
weechat_string_tolower (new_channel_lower);
if (weechat_hashtable_has_key (server->join_manual,
old_channel_lower))
{
weechat_hashtable_set (server->join_manual,
new_channel_lower,
weechat_hashtable_get (
server->join_manual,
old_channel_lower));
weechat_hashtable_remove (server->join_manual,
old_channel_lower);
}
if (weechat_hashtable_has_key (server->join_noswitch,
old_channel_lower))
{
weechat_hashtable_set (server->join_noswitch,
new_channel_lower,
weechat_hashtable_get (
server->join_noswitch,
old_channel_lower));
weechat_hashtable_remove (server->join_noswitch,
old_channel_lower);
}
free (new_channel_lower);
}
free (old_channel_lower);
}
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "728": quietlist.
*
* Message looks like:
* :server 728 mynick #channel mode quietmask nick!user@host 1351350090
*/
IRC_PROTOCOL_CALLBACK(728)
{
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
time_t datetime;
const char *nick_address;
char str_number[64];
IRC_PROTOCOL_MIN_ARGS(6);
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, argv[4][0]);
if (ptr_modelist)
{
/* start receiving new list */
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
irc_modelist_item_free_all (ptr_modelist);
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVING;
}
snprintf (str_number, sizeof (str_number),
"%s[%s%d%s] ",
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
((ptr_modelist->last_item) ? ptr_modelist->last_item->number + 1 : 0) + 1,
IRC_COLOR_CHAT_DELIMITERS);
}
else
str_number[0] = '\0';
if (argc >= 7)
{
nick_address = irc_protocol_nick_address (
server, 1, NULL, irc_message_get_nick_from_host (argv[6]),
irc_message_get_address_from_host (argv[6]));
if (argc >= 8)
{
datetime = (time_t)(atol ((argv[7][0] == ':') ? argv[7] + 1 : argv[7]));
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[5], argv[6], datetime);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "quietlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
/* TRANSLATORS: "%s" after "on" is a date */
_("%s%s[%s%s%s] %s%s%s%s quieted by %s on %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[5],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?",
weechat_util_get_time_string (&datetime));
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[5], argv[6], 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "quietlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s] %s%s%s%s quieted by %s"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[5],
IRC_COLOR_RESET,
(nick_address[0]) ? nick_address : "?");
}
}
else
{
if (ptr_modelist)
irc_modelist_item_new (ptr_modelist, argv[5], NULL, 0);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "quietlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
_("%s%s[%s%s%s] %s%s%s%s quieted"),
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
str_number,
IRC_COLOR_CHAT_HOST,
argv[5],
IRC_COLOR_RESET);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "729": end of quietlist.
*
* Message looks like:
* :server 729 mynick #channel mode :End of Channel Quiet List
*/
IRC_PROTOCOL_CALLBACK(729)
{
char *pos_args;
struct t_irc_channel *ptr_channel;
struct t_gui_buffer *ptr_buffer;
struct t_irc_modelist *ptr_modelist;
IRC_PROTOCOL_MIN_ARGS(5);
pos_args = (argc > 5) ?
((argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5]) : NULL;
ptr_channel = irc_channel_search (server, argv[3]);
ptr_buffer = (ptr_channel && ptr_channel->nicks) ?
ptr_channel->buffer : server->buffer;
ptr_modelist = irc_modelist_search (ptr_channel, argv[4][0]);
if (ptr_modelist)
{
if (ptr_modelist->state != IRC_MODELIST_STATE_RECEIVING)
{
/*
* remove all items if no quiet was received before
* the end of quiet list
*/
irc_modelist_item_free_all (ptr_modelist);
}
ptr_modelist->state = IRC_MODELIST_STATE_RECEIVED;
}
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "quietlist", ptr_buffer),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s[%s%s%s]%s%s%s",
weechat_prefix ("network"),
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_CHANNEL,
argv[3],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_RESET,
(pos_args) ? " " : "",
(pos_args) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "730": monitored nicks are online
* (RPL_MONONLINE).
*
* Message looks like:
* :server 730 mynick :nick1!user1@host1,nick2!user2@host2
*/
IRC_PROTOCOL_CALLBACK(730)
{
struct t_irc_notify *ptr_notify;
const char *monitor_nick, *monitor_host;
char **nicks;
int i, num_nicks;
IRC_PROTOCOL_MIN_ARGS(4);
nicks = weechat_string_split ((argv_eol[3][0] == ':') ?
argv_eol[3] + 1 : argv_eol[3],
",",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_nicks);
if (nicks)
{
for (i = 0; i < num_nicks; i++)
{
monitor_nick = irc_message_get_nick_from_host (nicks[i]);
monitor_host = strchr (nicks[i], '!');
if (monitor_host)
monitor_host++;
ptr_notify = irc_notify_search (server, monitor_nick);
if (ptr_notify)
irc_notify_set_is_on_server (ptr_notify, monitor_host, 1);
}
weechat_string_free_split (nicks);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "731": monitored nicks are offline
* (RPL_MONOFFLINE).
*
* Message looks like:
* :server 731 mynick :nick1!user1@host1,nick2!user2@host2
*/
IRC_PROTOCOL_CALLBACK(731)
{
struct t_irc_notify *ptr_notify;
const char *monitor_nick, *monitor_host;
char **nicks;
int i, num_nicks;
IRC_PROTOCOL_MIN_ARGS(4);
nicks = weechat_string_split ((argv_eol[3][0] == ':') ?
argv_eol[3] + 1 : argv_eol[3],
",",
NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0,
&num_nicks);
if (nicks)
{
for (i = 0; i < num_nicks; i++)
{
monitor_nick = irc_message_get_nick_from_host (nicks[i]);
monitor_host = strchr (nicks[i], '!');
if (monitor_host)
monitor_host++;
ptr_notify = irc_notify_search (server, monitor_nick);
if (ptr_notify)
irc_notify_set_is_on_server (ptr_notify, monitor_host, 0);
}
weechat_string_free_split (nicks);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "732": list of monitored nicks (RPL_MONLIST).
*
* Message looks like:
* :server 732 mynick :nick1!user1@host1,nick2!user2@host2
*/
IRC_PROTOCOL_CALLBACK(732)
{
char *pos_args;
IRC_PROTOCOL_MIN_ARGS(3);
pos_args = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "monitor", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
(pos_args && pos_args[0]) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "733": end of a monitor list (RPL_ENDOFMONLIST).
*
* Message looks like:
* :server 733 mynick :End of MONITOR list
*/
IRC_PROTOCOL_CALLBACK(733)
{
char *pos_args;
IRC_PROTOCOL_MIN_ARGS(3);
pos_args = (argc > 3) ?
((argv_eol[3][0] == ':') ? argv_eol[3] + 1 : argv_eol[3]) : NULL;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "monitor", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
(pos_args && pos_args[0]) ? pos_args : "");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "734": monitor list is full (ERR_MONLISTFULL)
*
* Message looks like:
* :server 734 mynick limit nick1,nick2 :Monitor list is full.
*/
IRC_PROTOCOL_CALLBACK(734)
{
char *pos_args;
IRC_PROTOCOL_MIN_ARGS(5);
pos_args = (argc > 5) ?
((argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5]) : NULL;
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (
server, NULL, command, "monitor", NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s (%s)",
weechat_prefix ("error"),
(pos_args && pos_args[0]) ? pos_args : "",
argv[3]);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "900": logged in as (SASL).
*
* Message looks like:
* :server 900 mynick nick!user@host mynick :You are now logged in as mynick
*/
IRC_PROTOCOL_CALLBACK(900)
{
IRC_PROTOCOL_MIN_ARGS(6);
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, argv[3], command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s %s(%s%s%s)",
weechat_prefix ("network"),
(argv_eol[5][0] == ':') ? argv_eol[5] + 1 : argv_eol[5],
IRC_COLOR_CHAT_DELIMITERS,
IRC_COLOR_CHAT_HOST,
argv[3],
IRC_COLOR_CHAT_DELIMITERS);
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC message "901": you are now logged in.
*
* Message looks like:
* :server 901 mynick nick user host :You are now logged in. (id nick, username user, hostname host)
*/
IRC_PROTOCOL_CALLBACK(901)
{
IRC_PROTOCOL_MIN_ARGS(6);
if (argc >= 7)
{
weechat_printf_date_tags (
irc_msgbuffer_get_target_buffer (server, NULL, command, NULL, NULL),
date,
irc_protocol_tags (command, "irc_numeric", NULL, NULL),
"%s%s",
weechat_prefix ("network"),
(argv_eol[6][0] == ':') ? argv_eol[6] + 1 : argv_eol[6]);
}
else
{
irc_protocol_cb_numeric (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
}
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC messages "903" and "907" (SASL OK).
*
* Messages look like:
* :server 903 nick :SASL authentication successful
* :server 904 nick :SASL authentication failed
*/
IRC_PROTOCOL_CALLBACK(sasl_end_ok)
{
irc_protocol_cb_numeric (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
if (!server->is_connected)
irc_server_sendf (server, 0, NULL, "CAP END");
return WEECHAT_RC_OK;
}
/*
* Callback for the IRC messages "902", "904", "905", "906" (SASL failed).
*
* Messages look like:
* :server 904 nick :SASL authentication failed
*/
IRC_PROTOCOL_CALLBACK(sasl_end_fail)
{
int sasl_fail;
irc_protocol_cb_numeric (server,
date, nick, address, host, command,
ignored, argc, argv, argv_eol);
sasl_fail = IRC_SERVER_OPTION_INTEGER(server, IRC_SERVER_OPTION_SASL_FAIL);
if ((sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT)
|| (sasl_fail == IRC_SERVER_SASL_FAIL_DISCONNECT))
{
irc_server_disconnect (
server, 0,
(sasl_fail == IRC_SERVER_SASL_FAIL_RECONNECT) ? 1 : 0);
return WEECHAT_RC_OK;
}
if (!server->is_connected)
irc_server_sendf (server, 0, NULL, "CAP END");
return WEECHAT_RC_OK;
}
/*
* Returns hashtable with tags for an IRC message.
*
* Example:
* if tags == "aaa=bbb;ccc;example.com/ddd=eee",
* hashtable will have following keys/values:
* "aaa" => "bbb"
* "ccc" => NULL
* "example.com/ddd" => "eee"
*/
struct t_hashtable *
irc_protocol_get_message_tags (const char *tags)
{
struct t_hashtable *hashtable;
char **items, *pos, *key;
int num_items, i;
if (!tags || !tags[0])
return NULL;
hashtable = weechat_hashtable_new (32,
WEECHAT_HASHTABLE_STRING,
WEECHAT_HASHTABLE_STRING,
NULL, NULL);
if (!hashtable)
return NULL;
items = weechat_string_split (tags, ";", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, &num_items);
if (items)
{
for (i = 0; i < num_items; i++)
{
pos = strchr (items[i], '=');
if (pos)
{
/* format: "tag=value" */
key = weechat_strndup (items[i], pos - items[i]);
if (key)
{
weechat_hashtable_set (hashtable, key, pos + 1);
free (key);
}
}
else
{
/* format: "tag" */
weechat_hashtable_set (hashtable, items[i], NULL);
}
}
weechat_string_free_split (items);
}
return hashtable;
}
/*
* Parses date/time received in a "time" tag.
*
* Returns value of time (timestamp), 0 if error.
*/
time_t
irc_protocol_parse_time (const char *time)
{
time_t time_value, time_msg, time_gm, time_local;
struct tm tm_date, tm_date_gm, tm_date_local;
long value;
char *time2, *pos, *error;
if (!time || !time[0])
return 0;
time_value = 0;
if (strchr (time, '-'))
{
/* date is with ISO 8601 format: "2012-11-24T07:41:02.018Z" */
/* initialize structure, because strptime does not do it */
memset (&tm_date, 0, sizeof (struct tm));
if (strptime (time, "%Y-%m-%dT%H:%M:%S", &tm_date))
{
if (tm_date.tm_year > 0)
{
time_msg = mktime (&tm_date);
gmtime_r (&time_msg, &tm_date_gm);
localtime_r (&time_msg, &tm_date_local);
time_gm = mktime (&tm_date_gm);
time_local = mktime (&tm_date_local);
time_value = mktime (&tm_date_local) + (time_local - time_gm);
}
}
}
else
{
/* date is with timestamp format: "1353403519.478" */
time2 = strdup (time);
if (time2)
{
pos = strchr (time2, '.');
if (pos)
pos[0] = '\0';
pos = strchr (time2, ',');
if (pos)
pos[0] = '\0';
value = strtol (time2, &error, 10);
if (error && !error[0] && (value >= 0))
time_value = (int)value;
free (time2);
}
}
return time_value;
}
/*
* Executes action when an IRC message is received.
*
* Argument "irc_message" is the full message without optional tags.
*/
void
irc_protocol_recv_command (struct t_irc_server *server,
const char *irc_message,
const char *msg_command,
const char *msg_channel)
{
int i, cmd_found, return_code, argc, decode_color, keep_trailing_spaces;
int message_ignored, flags;
char *message_colors_decoded, *pos_space, *tags;
struct t_irc_channel *ptr_channel;
t_irc_recv_func *cmd_recv_func;
const char *cmd_name, *ptr_msg_after_tags;
time_t date;
const char *nick1, *address1, *host1;
char *nick, *address, *address_color, *host, *host_no_color, *host_color;
char **argv, **argv_eol;
struct t_hashtable *hash_tags;
struct t_irc_protocol_msg irc_protocol_messages[] =
{ { "account", /* account (cap account-notify) */ 1, 0, &irc_protocol_cb_account },
{ "authenticate", /* authenticate */ 1, 0, &irc_protocol_cb_authenticate },
{ "away", /* away (cap away-notify) */ 1, 0, &irc_protocol_cb_away },
{ "cap", /* client capability */ 1, 0, &irc_protocol_cb_cap },
{ "chghost", /* user/host change (cap chghost) */ 1, 0, &irc_protocol_cb_chghost },
{ "error", /* error received from IRC server */ 1, 0, &irc_protocol_cb_error },
{ "invite", /* invite a nick on a channel */ 1, 0, &irc_protocol_cb_invite },
{ "join", /* join a channel */ 1, 0, &irc_protocol_cb_join },
{ "kick", /* forcibly remove a user from a channel */ 1, 1, &irc_protocol_cb_kick },
{ "kill", /* close client-server connection */ 1, 1, &irc_protocol_cb_kill },
{ "mode", /* change channel or user mode */ 1, 0, &irc_protocol_cb_mode },
{ "nick", /* change current nickname */ 1, 0, &irc_protocol_cb_nick },
{ "notice", /* send notice message to user */ 1, 1, &irc_protocol_cb_notice },
{ "part", /* leave a channel */ 1, 1, &irc_protocol_cb_part },
{ "ping", /* ping server */ 1, 0, &irc_protocol_cb_ping },
{ "pong", /* answer to a ping message */ 1, 0, &irc_protocol_cb_pong },
{ "privmsg", /* message received */ 1, 1, &irc_protocol_cb_privmsg },
{ "quit", /* close all connections and quit */ 1, 1, &irc_protocol_cb_quit },
{ "topic", /* get/set channel topic */ 0, 1, &irc_protocol_cb_topic },
{ "wallops", /* send a message to all currently connected users who have "
"set the 'w' user mode "
"for themselves */ 1, 1, &irc_protocol_cb_wallops },
{ "001", /* a server message */ 1, 0, &irc_protocol_cb_001 },
{ "005", /* a server message */ 1, 0, &irc_protocol_cb_005 },
{ "008", /* server notice mask */ 1, 0, &irc_protocol_cb_008 },
{ "221", /* user mode string */ 1, 0, &irc_protocol_cb_221 },
{ "223", /* whois (charset is) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "264", /* whois (is using encrypted connection) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "275", /* whois (secure connection) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "276", /* whois (has client certificate fingerprint) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "301", /* away message */ 1, 1, &irc_protocol_cb_301 },
{ "303", /* ison */ 1, 0, &irc_protocol_cb_303 },
{ "305", /* unaway */ 1, 0, &irc_protocol_cb_305 },
{ "306", /* now away */ 1, 0, &irc_protocol_cb_306 },
{ "307", /* whois (registered nick) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "310", /* whois (help mode) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "311", /* whois (user) */ 1, 0, &irc_protocol_cb_311 },
{ "312", /* whois (server) */ 1, 0, &irc_protocol_cb_312 },
{ "313", /* whois (operator) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "314", /* whowas */ 1, 0, &irc_protocol_cb_314 },
{ "315", /* end of /who list */ 1, 0, &irc_protocol_cb_315 },
{ "317", /* whois (idle) */ 1, 0, &irc_protocol_cb_317 },
{ "318", /* whois (end) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "319", /* whois (channels) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "320", /* whois (identified user) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "321", /* /list start */ 1, 0, &irc_protocol_cb_321 },
{ "322", /* channel (for /list) */ 1, 0, &irc_protocol_cb_322 },
{ "323", /* end of /list */ 1, 0, &irc_protocol_cb_323 },
{ "324", /* channel mode */ 1, 0, &irc_protocol_cb_324 },
{ "326", /* whois (has oper privs) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "327", /* whois (host) */ 1, 0, &irc_protocol_cb_327 },
{ "328", /* channel url */ 1, 0, &irc_protocol_cb_328 },
{ "329", /* channel creation date */ 1, 0, &irc_protocol_cb_329 },
{ "330", /* is logged in as */ 1, 0, &irc_protocol_cb_330_343 },
{ "331", /* no topic for channel */ 1, 0, &irc_protocol_cb_331 },
{ "332", /* topic of channel */ 0, 1, &irc_protocol_cb_332 },
{ "333", /* infos about topic (nick and date changed) */ 1, 0, &irc_protocol_cb_333 },
{ "335", /* is a bot on */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "338", /* whois (host) */ 1, 0, &irc_protocol_cb_338 },
{ "341", /* inviting */ 1, 0, &irc_protocol_cb_341 },
{ "343", /* is opered as */ 1, 0, &irc_protocol_cb_330_343 },
{ "344", /* channel reop */ 1, 0, &irc_protocol_cb_344 },
{ "345", /* end of channel reop list */ 1, 0, &irc_protocol_cb_345 },
{ "346", /* invite list */ 1, 0, &irc_protocol_cb_346 },
{ "347", /* end of invite list */ 1, 0, &irc_protocol_cb_347 },
{ "348", /* channel exception list */ 1, 0, &irc_protocol_cb_348 },
{ "349", /* end of channel exception list */ 1, 0, &irc_protocol_cb_349 },
{ "351", /* server version */ 1, 0, &irc_protocol_cb_351 },
{ "352", /* who */ 1, 0, &irc_protocol_cb_352 },
{ "353", /* list of nicks on channel */ 1, 0, &irc_protocol_cb_353 },
{ "354", /* whox */ 1, 0, &irc_protocol_cb_354 },
{ "366", /* end of /names list */ 1, 0, &irc_protocol_cb_366 },
{ "367", /* banlist */ 1, 0, &irc_protocol_cb_367 },
{ "368", /* end of banlist */ 1, 0, &irc_protocol_cb_368 },
{ "369", /* whowas (end) */ 1, 0, &irc_protocol_cb_whowas_nick_msg },
{ "378", /* whois (connecting from) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "379", /* whois (using modes) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "401", /* no such nick/channel */ 1, 0, &irc_protocol_cb_generic_error },
{ "402", /* no such server */ 1, 0, &irc_protocol_cb_generic_error },
{ "403", /* no such channel */ 1, 0, &irc_protocol_cb_generic_error },
{ "404", /* cannot send to channel */ 1, 0, &irc_protocol_cb_generic_error },
{ "405", /* too many channels */ 1, 0, &irc_protocol_cb_generic_error },
{ "406", /* was no such nick */ 1, 0, &irc_protocol_cb_generic_error },
{ "407", /* was no such nick */ 1, 0, &irc_protocol_cb_generic_error },
{ "409", /* no origin */ 1, 0, &irc_protocol_cb_generic_error },
{ "410", /* no services */ 1, 0, &irc_protocol_cb_generic_error },
{ "411", /* no recipient */ 1, 0, &irc_protocol_cb_generic_error },
{ "412", /* no text to send */ 1, 0, &irc_protocol_cb_generic_error },
{ "413", /* no toplevel */ 1, 0, &irc_protocol_cb_generic_error },
{ "414", /* wilcard in toplevel domain */ 1, 0, &irc_protocol_cb_generic_error },
{ "421", /* unknown command */ 1, 0, &irc_protocol_cb_generic_error },
{ "422", /* MOTD is missing */ 1, 0, &irc_protocol_cb_generic_error },
{ "423", /* no administrative info */ 1, 0, &irc_protocol_cb_generic_error },
{ "424", /* file error */ 1, 0, &irc_protocol_cb_generic_error },
{ "431", /* no nickname given */ 1, 0, &irc_protocol_cb_generic_error },
{ "432", /* erroneous nickname */ 1, 0, &irc_protocol_cb_432 },
{ "433", /* nickname already in use */ 1, 0, &irc_protocol_cb_433 },
{ "436", /* nickname collision */ 1, 0, &irc_protocol_cb_generic_error },
{ "437", /* nick/channel unavailable */ 1, 0, &irc_protocol_cb_437 },
{ "438", /* not authorized to change nickname */ 1, 0, &irc_protocol_cb_438 },
{ "441", /* user not in channel */ 1, 0, &irc_protocol_cb_generic_error },
{ "442", /* not on channel */ 1, 0, &irc_protocol_cb_generic_error },
{ "443", /* user already on channel */ 1, 0, &irc_protocol_cb_generic_error },
{ "444", /* user not logged in */ 1, 0, &irc_protocol_cb_generic_error },
{ "445", /* summon has been disabled */ 1, 0, &irc_protocol_cb_generic_error },
{ "446", /* users has been disabled */ 1, 0, &irc_protocol_cb_generic_error },
{ "451", /* you are not registered */ 1, 0, &irc_protocol_cb_generic_error },
{ "461", /* not enough parameters */ 1, 0, &irc_protocol_cb_generic_error },
{ "462", /* you may not register */ 1, 0, &irc_protocol_cb_generic_error },
{ "463", /* your host isn't among the privileged */ 1, 0, &irc_protocol_cb_generic_error },
{ "464", /* password incorrect */ 1, 0, &irc_protocol_cb_generic_error },
{ "465", /* you are banned from this server */ 1, 0, &irc_protocol_cb_generic_error },
{ "467", /* channel key already set */ 1, 0, &irc_protocol_cb_generic_error },
{ "470", /* forwarding to another channel */ 1, 0, &irc_protocol_cb_470 },
{ "471", /* channel is already full */ 1, 0, &irc_protocol_cb_generic_error },
{ "472", /* unknown mode char to me */ 1, 0, &irc_protocol_cb_generic_error },
{ "473", /* cannot join channel (invite only) */ 1, 0, &irc_protocol_cb_generic_error },
{ "474", /* cannot join channel (banned from channel) */ 1, 0, &irc_protocol_cb_generic_error },
{ "475", /* cannot join channel (bad channel key) */ 1, 0, &irc_protocol_cb_generic_error },
{ "476", /* bad channel mask */ 1, 0, &irc_protocol_cb_generic_error },
{ "477", /* channel doesn't support modes */ 1, 0, &irc_protocol_cb_generic_error },
{ "481", /* you're not an IRC operator */ 1, 0, &irc_protocol_cb_generic_error },
{ "482", /* you're not channel operator */ 1, 0, &irc_protocol_cb_generic_error },
{ "483", /* you can't kill a server! */ 1, 0, &irc_protocol_cb_generic_error },
{ "484", /* your connection is restricted! */ 1, 0, &irc_protocol_cb_generic_error },
{ "485", /* user is immune from kick/deop */ 1, 0, &irc_protocol_cb_generic_error },
{ "487", /* network split */ 1, 0, &irc_protocol_cb_generic_error },
{ "491", /* no O-lines for your host */ 1, 0, &irc_protocol_cb_generic_error },
{ "501", /* unknown mode flag */ 1, 0, &irc_protocol_cb_generic_error },
{ "502", /* can't change mode for other users */ 1, 0, &irc_protocol_cb_generic_error },
{ "671", /* whois (secure connection) */ 1, 0, &irc_protocol_cb_whois_nick_msg },
{ "728", /* quietlist */ 1, 0, &irc_protocol_cb_728 },
{ "729", /* end of quietlist */ 1, 0, &irc_protocol_cb_729 },
{ "730", /* monitored nicks online */ 1, 0, &irc_protocol_cb_730 },
{ "731", /* monitored nicks offline */ 1, 0, &irc_protocol_cb_731 },
{ "732", /* list of monitored nicks */ 1, 0, &irc_protocol_cb_732 },
{ "733", /* end of monitor list */ 1, 0, &irc_protocol_cb_733 },
{ "734", /* monitor list is full */ 1, 0, &irc_protocol_cb_734 },
{ "900", /* logged in as (SASL) */ 1, 0, &irc_protocol_cb_900 },
{ "901", /* you are now logged in */ 1, 0, &irc_protocol_cb_901 },
{ "902", /* SASL authentication failed (account locked/held) */ 1, 0, &irc_protocol_cb_sasl_end_fail },
{ "903", /* SASL authentication successful */ 1, 0, &irc_protocol_cb_sasl_end_ok },
{ "904", /* SASL authentication failed */ 1, 0, &irc_protocol_cb_sasl_end_fail },
{ "905", /* SASL message too long */ 1, 0, &irc_protocol_cb_sasl_end_fail },
{ "906", /* SASL authentication aborted */ 1, 0, &irc_protocol_cb_sasl_end_fail },
{ "907", /* You have already completed SASL authentication */ 1, 0, &irc_protocol_cb_sasl_end_ok },
{ "936", /* censored word */ 1, 0, &irc_protocol_cb_generic_error },
{ "973", /* whois (secure connection) */ 1, 0, &irc_protocol_cb_server_mode_reason },
{ "974", /* whois (secure connection) */ 1, 0, &irc_protocol_cb_server_mode_reason },
{ "975", /* whois (secure connection) */ 1, 0, &irc_protocol_cb_server_mode_reason },
{ NULL, 0, 0, NULL }
};
if (!msg_command)
return;
message_colors_decoded = NULL;
argv = NULL;
argv_eol = NULL;
hash_tags = NULL;
date = 0;
ptr_msg_after_tags = irc_message;
/* get tags as hashtable */
if (irc_message && (irc_message[0] == '@'))
{
pos_space = strchr (irc_message, ' ');
if (pos_space)
{
tags = weechat_strndup (irc_message + 1,
pos_space - (irc_message + 1));
if (tags)
{
hash_tags = irc_protocol_get_message_tags (tags);
if (hash_tags)
{
date = irc_protocol_parse_time (
weechat_hashtable_get (hash_tags, "time"));
}
free (tags);
}
ptr_msg_after_tags = pos_space;
while (ptr_msg_after_tags[0] == ' ')
{
ptr_msg_after_tags++;
}
}
else
ptr_msg_after_tags = NULL;
}
/* get nick/host/address from IRC message */
nick1 = NULL;
address1 = NULL;
host1 = NULL;
if (ptr_msg_after_tags && (ptr_msg_after_tags[0] == ':'))
{
nick1 = irc_message_get_nick_from_host (ptr_msg_after_tags);
address1 = irc_message_get_address_from_host (ptr_msg_after_tags);
host1 = ptr_msg_after_tags + 1;
}
nick = (nick1) ? strdup (nick1) : NULL;
address = (address1) ? strdup (address1) : NULL;
address_color = (address) ?
irc_color_decode (
address,
weechat_config_boolean (irc_config_network_colors_receive)) :
NULL;
host = (host1) ? strdup (host1) : NULL;
if (host)
{
pos_space = strchr (host, ' ');
if (pos_space)
pos_space[0] = '\0';
}
host_no_color = (host) ? irc_color_decode (host, 0) : NULL;
host_color = (host) ?
irc_color_decode (
host,
weechat_config_boolean (irc_config_network_colors_receive)) :
NULL;
/* check if message is ignored or not */
ptr_channel = NULL;
if (msg_channel)
ptr_channel = irc_channel_search (server, msg_channel);
message_ignored = irc_ignore_check (
server,
(ptr_channel) ? ptr_channel->name : msg_channel,
nick, host_no_color);
/* send signal with received command, even if command is ignored */
irc_server_send_signal (server, "irc_raw_in", msg_command,
irc_message, NULL);
/* send signal with received command, only if message is not ignored */
if (!message_ignored)
{
irc_server_send_signal (server, "irc_in", msg_command,
irc_message, NULL);
}
/* look for IRC command */
cmd_found = -1;
for (i = 0; irc_protocol_messages[i].name; i++)
{
if (weechat_strcasecmp (irc_protocol_messages[i].name,
msg_command) == 0)
{
cmd_found = i;
break;
}
}
/* command not found */
if (cmd_found < 0)
{
/* for numeric commands, we use default recv function */
if (irc_protocol_is_numeric_command (msg_command))
{
cmd_name = msg_command;
decode_color = 1;
keep_trailing_spaces = 0;
cmd_recv_func = irc_protocol_cb_numeric;
}
else
{
weechat_printf (server->buffer,
_("%s%s: command \"%s\" not found:"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
msg_command);
weechat_printf (server->buffer,
"%s%s",
weechat_prefix ("error"), irc_message);
goto end;
}
}
else
{
cmd_name = irc_protocol_messages[cmd_found].name;
decode_color = irc_protocol_messages[cmd_found].decode_color;
keep_trailing_spaces = irc_protocol_messages[cmd_found].keep_trailing_spaces;
cmd_recv_func = irc_protocol_messages[cmd_found].recv_function;
}
if (cmd_recv_func != NULL)
{
if (ptr_msg_after_tags)
{
if (decode_color)
{
message_colors_decoded = irc_color_decode (
ptr_msg_after_tags,
weechat_config_boolean (irc_config_network_colors_receive));
}
else
{
message_colors_decoded = strdup (ptr_msg_after_tags);
}
}
else
message_colors_decoded = NULL;
argv = weechat_string_split (message_colors_decoded, " ", NULL,
WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_STRIP_RIGHT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS,
0, &argc);
flags = WEECHAT_STRING_SPLIT_STRIP_LEFT
| WEECHAT_STRING_SPLIT_COLLAPSE_SEPS
| WEECHAT_STRING_SPLIT_KEEP_EOL;
if (keep_trailing_spaces)
flags |= WEECHAT_STRING_SPLIT_STRIP_RIGHT;
argv_eol = weechat_string_split (message_colors_decoded, " ", NULL,
flags, 0, NULL);
return_code = (int) (cmd_recv_func) (server,
date, nick, address_color,
host_color, cmd_name,
message_ignored, argc, argv,
argv_eol);
if (return_code == WEECHAT_RC_ERROR)
{
weechat_printf (server->buffer,
_("%s%s: failed to parse command \"%s\" (please "
"report to developers):"),
weechat_prefix ("error"), IRC_PLUGIN_NAME,
msg_command);
weechat_printf (server->buffer,
"%s%s",
weechat_prefix ("error"), irc_message);
}
/* send signal with received command (if message is not ignored) */
if (!message_ignored)
{
irc_server_send_signal (server, "irc_in2", msg_command,
irc_message, NULL);
}
}
/* send signal with received command, even if command is ignored */
irc_server_send_signal (server, "irc_raw_in2", msg_command,
irc_message, NULL);
end:
if (nick)
free (nick);
if (address)
free (address);
if (address_color)
free (address_color);
if (host)
free (host);
if (host_no_color)
free (host_no_color);
if (host_color)
free (host_color);
if (message_colors_decoded)
free (message_colors_decoded);
if (argv)
weechat_string_free_split (argv);
if (argv_eol)
weechat_string_free_split (argv_eol);
if (hash_tags)
weechat_hashtable_free (hash_tags);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_4696_1 |
crossvul-cpp_data_bad_799_0 | /*
* HEVC video Decoder
*
* Copyright (C) 2012 - 2013 Guillaume Martres
* Copyright (C) 2012 - 2013 Mickael Raulet
* Copyright (C) 2012 - 2013 Gildas Cocherel
* Copyright (C) 2012 - 2013 Wassim Hamidouche
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "libavutil/display.h"
#include "libavutil/internal.h"
#include "libavutil/mastering_display_metadata.h"
#include "libavutil/md5.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/stereo3d.h"
#include "bswapdsp.h"
#include "bytestream.h"
#include "cabac_functions.h"
#include "golomb.h"
#include "hevc.h"
#include "hevc_data.h"
#include "hevc_parse.h"
#include "hevcdec.h"
#include "hwaccel.h"
#include "profiles.h"
const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
/**
* NOTE: Each function hls_foo correspond to the function foo in the
* specification (HLS stands for High Level Syntax).
*/
/**
* Section 5.7
*/
/* free everything allocated by pic_arrays_init() */
static void pic_arrays_free(HEVCContext *s)
{
av_freep(&s->sao);
av_freep(&s->deblock);
av_freep(&s->skip_flag);
av_freep(&s->tab_ct_depth);
av_freep(&s->tab_ipm);
av_freep(&s->cbf_luma);
av_freep(&s->is_pcm);
av_freep(&s->qp_y_tab);
av_freep(&s->tab_slice_address);
av_freep(&s->filter_slice_edges);
av_freep(&s->horizontal_bs);
av_freep(&s->vertical_bs);
av_freep(&s->sh.entry_point_offset);
av_freep(&s->sh.size);
av_freep(&s->sh.offset);
av_buffer_pool_uninit(&s->tab_mvf_pool);
av_buffer_pool_uninit(&s->rpl_tab_pool);
}
/* allocate arrays that depend on frame dimensions */
static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
{
int log2_min_cb_size = sps->log2_min_cb_size;
int width = sps->width;
int height = sps->height;
int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
((height >> log2_min_cb_size) + 1);
int ctb_count = sps->ctb_width * sps->ctb_height;
int min_pu_size = sps->min_pu_width * sps->min_pu_height;
s->bs_width = (width >> 2) + 1;
s->bs_height = (height >> 2) + 1;
s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao));
s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock));
if (!s->sao || !s->deblock)
goto fail;
s->skip_flag = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
s->tab_ct_depth = av_malloc_array(sps->min_cb_height, sps->min_cb_width);
if (!s->skip_flag || !s->tab_ct_depth)
goto fail;
s->cbf_luma = av_malloc_array(sps->min_tb_width, sps->min_tb_height);
s->tab_ipm = av_mallocz(min_pu_size);
s->is_pcm = av_malloc_array(sps->min_pu_width + 1, sps->min_pu_height + 1);
if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
goto fail;
s->filter_slice_edges = av_mallocz(ctb_count);
s->tab_slice_address = av_malloc_array(pic_size_in_ctb,
sizeof(*s->tab_slice_address));
s->qp_y_tab = av_malloc_array(pic_size_in_ctb,
sizeof(*s->qp_y_tab));
if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
goto fail;
s->horizontal_bs = av_mallocz_array(s->bs_width, s->bs_height);
s->vertical_bs = av_mallocz_array(s->bs_width, s->bs_height);
if (!s->horizontal_bs || !s->vertical_bs)
goto fail;
s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
av_buffer_allocz);
s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
av_buffer_allocz);
if (!s->tab_mvf_pool || !s->rpl_tab_pool)
goto fail;
return 0;
fail:
pic_arrays_free(s);
return AVERROR(ENOMEM);
}
static int pred_weight_table(HEVCContext *s, GetBitContext *gb)
{
int i = 0;
int j = 0;
uint8_t luma_weight_l0_flag[16];
uint8_t chroma_weight_l0_flag[16];
uint8_t luma_weight_l1_flag[16];
uint8_t chroma_weight_l1_flag[16];
int luma_log2_weight_denom;
luma_log2_weight_denom = get_ue_golomb_long(gb);
if (luma_log2_weight_denom < 0 || luma_log2_weight_denom > 7) {
av_log(s->avctx, AV_LOG_ERROR, "luma_log2_weight_denom %d is invalid\n", luma_log2_weight_denom);
return AVERROR_INVALIDDATA;
}
s->sh.luma_log2_weight_denom = av_clip_uintp2(luma_log2_weight_denom, 3);
if (s->ps.sps->chroma_format_idc != 0) {
int64_t chroma_log2_weight_denom = luma_log2_weight_denom + (int64_t)get_se_golomb(gb);
if (chroma_log2_weight_denom < 0 || chroma_log2_weight_denom > 7) {
av_log(s->avctx, AV_LOG_ERROR, "chroma_log2_weight_denom %"PRId64" is invalid\n", chroma_log2_weight_denom);
return AVERROR_INVALIDDATA;
}
s->sh.chroma_log2_weight_denom = chroma_log2_weight_denom;
}
for (i = 0; i < s->sh.nb_refs[L0]; i++) {
luma_weight_l0_flag[i] = get_bits1(gb);
if (!luma_weight_l0_flag[i]) {
s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
s->sh.luma_offset_l0[i] = 0;
}
}
if (s->ps.sps->chroma_format_idc != 0) {
for (i = 0; i < s->sh.nb_refs[L0]; i++)
chroma_weight_l0_flag[i] = get_bits1(gb);
} else {
for (i = 0; i < s->sh.nb_refs[L0]; i++)
chroma_weight_l0_flag[i] = 0;
}
for (i = 0; i < s->sh.nb_refs[L0]; i++) {
if (luma_weight_l0_flag[i]) {
int delta_luma_weight_l0 = get_se_golomb(gb);
s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
s->sh.luma_offset_l0[i] = get_se_golomb(gb);
}
if (chroma_weight_l0_flag[i]) {
for (j = 0; j < 2; j++) {
int delta_chroma_weight_l0 = get_se_golomb(gb);
int delta_chroma_offset_l0 = get_se_golomb(gb);
if ( (int8_t)delta_chroma_weight_l0 != delta_chroma_weight_l0
|| delta_chroma_offset_l0 < -(1<<17) || delta_chroma_offset_l0 > (1<<17)) {
return AVERROR_INVALIDDATA;
}
s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
>> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
}
} else {
s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l0[i][0] = 0;
s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l0[i][1] = 0;
}
}
if (s->sh.slice_type == HEVC_SLICE_B) {
for (i = 0; i < s->sh.nb_refs[L1]; i++) {
luma_weight_l1_flag[i] = get_bits1(gb);
if (!luma_weight_l1_flag[i]) {
s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
s->sh.luma_offset_l1[i] = 0;
}
}
if (s->ps.sps->chroma_format_idc != 0) {
for (i = 0; i < s->sh.nb_refs[L1]; i++)
chroma_weight_l1_flag[i] = get_bits1(gb);
} else {
for (i = 0; i < s->sh.nb_refs[L1]; i++)
chroma_weight_l1_flag[i] = 0;
}
for (i = 0; i < s->sh.nb_refs[L1]; i++) {
if (luma_weight_l1_flag[i]) {
int delta_luma_weight_l1 = get_se_golomb(gb);
s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
s->sh.luma_offset_l1[i] = get_se_golomb(gb);
}
if (chroma_weight_l1_flag[i]) {
for (j = 0; j < 2; j++) {
int delta_chroma_weight_l1 = get_se_golomb(gb);
int delta_chroma_offset_l1 = get_se_golomb(gb);
if ( (int8_t)delta_chroma_weight_l1 != delta_chroma_weight_l1
|| delta_chroma_offset_l1 < -(1<<17) || delta_chroma_offset_l1 > (1<<17)) {
return AVERROR_INVALIDDATA;
}
s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
>> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
}
} else {
s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l1[i][0] = 0;
s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
s->sh.chroma_offset_l1[i][1] = 0;
}
}
}
return 0;
}
static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
{
const HEVCSPS *sps = s->ps.sps;
int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
int prev_delta_msb = 0;
unsigned int nb_sps = 0, nb_sh;
int i;
rps->nb_refs = 0;
if (!sps->long_term_ref_pics_present_flag)
return 0;
if (sps->num_long_term_ref_pics_sps > 0)
nb_sps = get_ue_golomb_long(gb);
nb_sh = get_ue_golomb_long(gb);
if (nb_sps > sps->num_long_term_ref_pics_sps)
return AVERROR_INVALIDDATA;
if (nb_sh + (uint64_t)nb_sps > FF_ARRAY_ELEMS(rps->poc))
return AVERROR_INVALIDDATA;
rps->nb_refs = nb_sh + nb_sps;
for (i = 0; i < rps->nb_refs; i++) {
uint8_t delta_poc_msb_present;
if (i < nb_sps) {
uint8_t lt_idx_sps = 0;
if (sps->num_long_term_ref_pics_sps > 1)
lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
} else {
rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
rps->used[i] = get_bits1(gb);
}
delta_poc_msb_present = get_bits1(gb);
if (delta_poc_msb_present) {
int64_t delta = get_ue_golomb_long(gb);
int64_t poc;
if (i && i != nb_sps)
delta += prev_delta_msb;
poc = rps->poc[i] + s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
if (poc != (int32_t)poc)
return AVERROR_INVALIDDATA;
rps->poc[i] = poc;
prev_delta_msb = delta;
}
}
return 0;
}
static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps,
const HEVCSPS *sps)
{
const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
const HEVCWindow *ow = &sps->output_window;
unsigned int num = 0, den = 0;
avctx->pix_fmt = sps->pix_fmt;
avctx->coded_width = sps->width;
avctx->coded_height = sps->height;
avctx->width = sps->width - ow->left_offset - ow->right_offset;
avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
avctx->profile = sps->ptl.general_ptl.profile_idc;
avctx->level = sps->ptl.general_ptl.level_idc;
ff_set_sar(avctx, sps->vui.sar);
if (sps->vui.video_signal_type_present_flag)
avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
else
avctx->color_range = AVCOL_RANGE_MPEG;
if (sps->vui.colour_description_present_flag) {
avctx->color_primaries = sps->vui.colour_primaries;
avctx->color_trc = sps->vui.transfer_characteristic;
avctx->colorspace = sps->vui.matrix_coeffs;
} else {
avctx->color_primaries = AVCOL_PRI_UNSPECIFIED;
avctx->color_trc = AVCOL_TRC_UNSPECIFIED;
avctx->colorspace = AVCOL_SPC_UNSPECIFIED;
}
if (vps->vps_timing_info_present_flag) {
num = vps->vps_num_units_in_tick;
den = vps->vps_time_scale;
} else if (sps->vui.vui_timing_info_present_flag) {
num = sps->vui.vui_num_units_in_tick;
den = sps->vui.vui_time_scale;
}
if (num != 0 && den != 0)
av_reduce(&avctx->framerate.den, &avctx->framerate.num,
num, den, 1 << 30);
}
static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
{
#define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
CONFIG_HEVC_NVDEC_HWACCEL + \
CONFIG_HEVC_VAAPI_HWACCEL + \
CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
CONFIG_HEVC_VDPAU_HWACCEL)
enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
switch (sps->pix_fmt) {
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUVJ420P:
#if CONFIG_HEVC_DXVA2_HWACCEL
*fmt++ = AV_PIX_FMT_DXVA2_VLD;
#endif
#if CONFIG_HEVC_D3D11VA_HWACCEL
*fmt++ = AV_PIX_FMT_D3D11VA_VLD;
*fmt++ = AV_PIX_FMT_D3D11;
#endif
#if CONFIG_HEVC_VAAPI_HWACCEL
*fmt++ = AV_PIX_FMT_VAAPI;
#endif
#if CONFIG_HEVC_VDPAU_HWACCEL
*fmt++ = AV_PIX_FMT_VDPAU;
#endif
#if CONFIG_HEVC_NVDEC_HWACCEL
*fmt++ = AV_PIX_FMT_CUDA;
#endif
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
#endif
break;
case AV_PIX_FMT_YUV420P10:
#if CONFIG_HEVC_DXVA2_HWACCEL
*fmt++ = AV_PIX_FMT_DXVA2_VLD;
#endif
#if CONFIG_HEVC_D3D11VA_HWACCEL
*fmt++ = AV_PIX_FMT_D3D11VA_VLD;
*fmt++ = AV_PIX_FMT_D3D11;
#endif
#if CONFIG_HEVC_VAAPI_HWACCEL
*fmt++ = AV_PIX_FMT_VAAPI;
#endif
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
#endif
#if CONFIG_HEVC_NVDEC_HWACCEL
*fmt++ = AV_PIX_FMT_CUDA;
#endif
break;
case AV_PIX_FMT_YUV420P12:
case AV_PIX_FMT_YUV444P:
case AV_PIX_FMT_YUV444P10:
case AV_PIX_FMT_YUV444P12:
#if CONFIG_HEVC_NVDEC_HWACCEL
*fmt++ = AV_PIX_FMT_CUDA;
#endif
break;
}
*fmt++ = sps->pix_fmt;
*fmt = AV_PIX_FMT_NONE;
return ff_thread_get_format(s->avctx, pix_fmts);
}
static int set_sps(HEVCContext *s, const HEVCSPS *sps,
enum AVPixelFormat pix_fmt)
{
int ret, i;
pic_arrays_free(s);
s->ps.sps = NULL;
s->ps.vps = NULL;
if (!sps)
return 0;
ret = pic_arrays_init(s, sps);
if (ret < 0)
goto fail;
export_stream_params(s->avctx, &s->ps, sps);
s->avctx->pix_fmt = pix_fmt;
ff_hevc_pred_init(&s->hpc, sps->bit_depth);
ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
ff_videodsp_init (&s->vdsp, sps->bit_depth);
for (i = 0; i < 3; i++) {
av_freep(&s->sao_pixel_buffer_h[i]);
av_freep(&s->sao_pixel_buffer_v[i]);
}
if (sps->sao_enabled && !s->avctx->hwaccel) {
int c_count = (sps->chroma_format_idc != 0) ? 3 : 1;
int c_idx;
for(c_idx = 0; c_idx < c_count; c_idx++) {
int w = sps->width >> sps->hshift[c_idx];
int h = sps->height >> sps->vshift[c_idx];
s->sao_pixel_buffer_h[c_idx] =
av_malloc((w * 2 * sps->ctb_height) <<
sps->pixel_shift);
s->sao_pixel_buffer_v[c_idx] =
av_malloc((h * 2 * sps->ctb_width) <<
sps->pixel_shift);
}
}
s->ps.sps = sps;
s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
return 0;
fail:
pic_arrays_free(s);
s->ps.sps = NULL;
return ret;
}
static int hls_slice_header(HEVCContext *s)
{
GetBitContext *gb = &s->HEVClc->gb;
SliceHeader *sh = &s->sh;
int i, ret;
// Coded parameters
sh->first_slice_in_pic_flag = get_bits1(gb);
if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
if (IS_IDR(s))
ff_hevc_clear_refs(s);
}
sh->no_output_of_prior_pics_flag = 0;
if (IS_IRAP(s))
sh->no_output_of_prior_pics_flag = get_bits1(gb);
sh->pps_id = get_ue_golomb_long(gb);
if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
return AVERROR_INVALIDDATA;
}
if (!sh->first_slice_in_pic_flag &&
s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
return AVERROR_INVALIDDATA;
}
s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
if (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos == 1)
sh->no_output_of_prior_pics_flag = 1;
if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
const HEVCSPS *last_sps = s->ps.sps;
enum AVPixelFormat pix_fmt;
if (last_sps && IS_IRAP(s) && s->nal_unit_type != HEVC_NAL_CRA_NUT) {
if (sps->width != last_sps->width || sps->height != last_sps->height ||
sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering !=
last_sps->temporal_layer[last_sps->max_sub_layers - 1].max_dec_pic_buffering)
sh->no_output_of_prior_pics_flag = 0;
}
ff_hevc_clear_refs(s);
ret = set_sps(s, sps, sps->pix_fmt);
if (ret < 0)
return ret;
pix_fmt = get_format(s, sps);
if (pix_fmt < 0)
return pix_fmt;
s->avctx->pix_fmt = pix_fmt;
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
}
sh->dependent_slice_segment_flag = 0;
if (!sh->first_slice_in_pic_flag) {
int slice_address_length;
if (s->ps.pps->dependent_slice_segments_enabled_flag)
sh->dependent_slice_segment_flag = get_bits1(gb);
slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
s->ps.sps->ctb_height);
sh->slice_segment_addr = get_bitsz(gb, slice_address_length);
if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid slice segment address: %u.\n",
sh->slice_segment_addr);
return AVERROR_INVALIDDATA;
}
if (!sh->dependent_slice_segment_flag) {
sh->slice_addr = sh->slice_segment_addr;
s->slice_idx++;
}
} else {
sh->slice_segment_addr = sh->slice_addr = 0;
s->slice_idx = 0;
s->slice_initialized = 0;
}
if (!sh->dependent_slice_segment_flag) {
s->slice_initialized = 0;
for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
sh->slice_type = get_ue_golomb_long(gb);
if (!(sh->slice_type == HEVC_SLICE_I ||
sh->slice_type == HEVC_SLICE_P ||
sh->slice_type == HEVC_SLICE_B)) {
av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
sh->slice_type);
return AVERROR_INVALIDDATA;
}
if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
return AVERROR_INVALIDDATA;
}
// when flag is not present, picture is inferred to be output
sh->pic_output_flag = 1;
if (s->ps.pps->output_flag_present_flag)
sh->pic_output_flag = get_bits1(gb);
if (s->ps.sps->separate_colour_plane_flag)
sh->colour_plane_id = get_bits(gb, 2);
if (!IS_IDR(s)) {
int poc, pos;
sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
poc = ff_hevc_compute_poc(s->ps.sps, s->pocTid0, sh->pic_order_cnt_lsb, s->nal_unit_type);
if (!sh->first_slice_in_pic_flag && poc != s->poc) {
av_log(s->avctx, AV_LOG_WARNING,
"Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
poc = s->poc;
}
s->poc = poc;
sh->short_term_ref_pic_set_sps_flag = get_bits1(gb);
pos = get_bits_left(gb);
if (!sh->short_term_ref_pic_set_sps_flag) {
ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
if (ret < 0)
return ret;
sh->short_term_rps = &sh->slice_rps;
} else {
int numbits, rps_idx;
if (!s->ps.sps->nb_st_rps) {
av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
return AVERROR_INVALIDDATA;
}
numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
}
sh->short_term_ref_pic_set_size = pos - get_bits_left(gb);
pos = get_bits_left(gb);
ret = decode_lt_rps(s, &sh->long_term_rps, gb);
if (ret < 0) {
av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return AVERROR_INVALIDDATA;
}
sh->long_term_ref_pic_set_size = pos - get_bits_left(gb);
if (s->ps.sps->sps_temporal_mvp_enabled_flag)
sh->slice_temporal_mvp_enabled_flag = get_bits1(gb);
else
sh->slice_temporal_mvp_enabled_flag = 0;
} else {
s->sh.short_term_rps = NULL;
s->poc = 0;
}
/* 8.3.1 */
if (sh->first_slice_in_pic_flag && s->temporal_id == 0 &&
s->nal_unit_type != HEVC_NAL_TRAIL_N &&
s->nal_unit_type != HEVC_NAL_TSA_N &&
s->nal_unit_type != HEVC_NAL_STSA_N &&
s->nal_unit_type != HEVC_NAL_RADL_N &&
s->nal_unit_type != HEVC_NAL_RADL_R &&
s->nal_unit_type != HEVC_NAL_RASL_N &&
s->nal_unit_type != HEVC_NAL_RASL_R)
s->pocTid0 = s->poc;
if (s->ps.sps->sao_enabled) {
sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb);
if (s->ps.sps->chroma_format_idc) {
sh->slice_sample_adaptive_offset_flag[1] =
sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb);
}
} else {
sh->slice_sample_adaptive_offset_flag[0] = 0;
sh->slice_sample_adaptive_offset_flag[1] = 0;
sh->slice_sample_adaptive_offset_flag[2] = 0;
}
sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
int nb_refs;
sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
if (sh->slice_type == HEVC_SLICE_B)
sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
if (get_bits1(gb)) { // num_ref_idx_active_override_flag
sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
if (sh->slice_type == HEVC_SLICE_B)
sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
}
if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
sh->nb_refs[L0], sh->nb_refs[L1]);
return AVERROR_INVALIDDATA;
}
sh->rpl_modification_flag[0] = 0;
sh->rpl_modification_flag[1] = 0;
nb_refs = ff_hevc_frame_nb_refs(s);
if (!nb_refs) {
av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
return AVERROR_INVALIDDATA;
}
if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
sh->rpl_modification_flag[0] = get_bits1(gb);
if (sh->rpl_modification_flag[0]) {
for (i = 0; i < sh->nb_refs[L0]; i++)
sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
}
if (sh->slice_type == HEVC_SLICE_B) {
sh->rpl_modification_flag[1] = get_bits1(gb);
if (sh->rpl_modification_flag[1] == 1)
for (i = 0; i < sh->nb_refs[L1]; i++)
sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
}
}
if (sh->slice_type == HEVC_SLICE_B)
sh->mvd_l1_zero_flag = get_bits1(gb);
if (s->ps.pps->cabac_init_present_flag)
sh->cabac_init_flag = get_bits1(gb);
else
sh->cabac_init_flag = 0;
sh->collocated_ref_idx = 0;
if (sh->slice_temporal_mvp_enabled_flag) {
sh->collocated_list = L0;
if (sh->slice_type == HEVC_SLICE_B)
sh->collocated_list = !get_bits1(gb);
if (sh->nb_refs[sh->collocated_list] > 1) {
sh->collocated_ref_idx = get_ue_golomb_long(gb);
if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid collocated_ref_idx: %d.\n",
sh->collocated_ref_idx);
return AVERROR_INVALIDDATA;
}
}
}
if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
(s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
int ret = pred_weight_table(s, gb);
if (ret < 0)
return ret;
}
sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb);
if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid number of merging MVP candidates: %d.\n",
sh->max_num_merge_cand);
return AVERROR_INVALIDDATA;
}
}
sh->slice_qp_delta = get_se_golomb(gb);
if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
sh->slice_cb_qp_offset = get_se_golomb(gb);
sh->slice_cr_qp_offset = get_se_golomb(gb);
} else {
sh->slice_cb_qp_offset = 0;
sh->slice_cr_qp_offset = 0;
}
if (s->ps.pps->chroma_qp_offset_list_enabled_flag)
sh->cu_chroma_qp_offset_enabled_flag = get_bits1(gb);
else
sh->cu_chroma_qp_offset_enabled_flag = 0;
if (s->ps.pps->deblocking_filter_control_present_flag) {
int deblocking_filter_override_flag = 0;
if (s->ps.pps->deblocking_filter_override_enabled_flag)
deblocking_filter_override_flag = get_bits1(gb);
if (deblocking_filter_override_flag) {
sh->disable_deblocking_filter_flag = get_bits1(gb);
if (!sh->disable_deblocking_filter_flag) {
int beta_offset_div2 = get_se_golomb(gb);
int tc_offset_div2 = get_se_golomb(gb) ;
if (beta_offset_div2 < -6 || beta_offset_div2 > 6 ||
tc_offset_div2 < -6 || tc_offset_div2 > 6) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid deblock filter offsets: %d, %d\n",
beta_offset_div2, tc_offset_div2);
return AVERROR_INVALIDDATA;
}
sh->beta_offset = beta_offset_div2 * 2;
sh->tc_offset = tc_offset_div2 * 2;
}
} else {
sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
sh->beta_offset = s->ps.pps->beta_offset;
sh->tc_offset = s->ps.pps->tc_offset;
}
} else {
sh->disable_deblocking_filter_flag = 0;
sh->beta_offset = 0;
sh->tc_offset = 0;
}
if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
(sh->slice_sample_adaptive_offset_flag[0] ||
sh->slice_sample_adaptive_offset_flag[1] ||
!sh->disable_deblocking_filter_flag)) {
sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb);
} else {
sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
}
} else if (!s->slice_initialized) {
av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
return AVERROR_INVALIDDATA;
}
sh->num_entry_point_offsets = 0;
if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
unsigned num_entry_point_offsets = get_ue_golomb_long(gb);
// It would be possible to bound this tighter but this here is simpler
if (num_entry_point_offsets > get_bits_left(gb)) {
av_log(s->avctx, AV_LOG_ERROR, "num_entry_point_offsets %d is invalid\n", num_entry_point_offsets);
return AVERROR_INVALIDDATA;
}
sh->num_entry_point_offsets = num_entry_point_offsets;
if (sh->num_entry_point_offsets > 0) {
int offset_len = get_ue_golomb_long(gb) + 1;
if (offset_len < 1 || offset_len > 32) {
sh->num_entry_point_offsets = 0;
av_log(s->avctx, AV_LOG_ERROR, "offset_len %d is invalid\n", offset_len);
return AVERROR_INVALIDDATA;
}
av_freep(&sh->entry_point_offset);
av_freep(&sh->offset);
av_freep(&sh->size);
sh->entry_point_offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(unsigned));
sh->offset = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
sh->size = av_malloc_array(sh->num_entry_point_offsets, sizeof(int));
if (!sh->entry_point_offset || !sh->offset || !sh->size) {
sh->num_entry_point_offsets = 0;
av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate memory\n");
return AVERROR(ENOMEM);
}
for (i = 0; i < sh->num_entry_point_offsets; i++) {
unsigned val = get_bits_long(gb, offset_len);
sh->entry_point_offset[i] = val + 1; // +1; // +1 to get the size
}
if (s->threads_number > 1 && (s->ps.pps->num_tile_rows > 1 || s->ps.pps->num_tile_columns > 1)) {
s->enable_parallel_tiles = 0; // TODO: you can enable tiles in parallel here
s->threads_number = 1;
} else
s->enable_parallel_tiles = 0;
} else
s->enable_parallel_tiles = 0;
}
if (s->ps.pps->slice_header_extension_present_flag) {
unsigned int length = get_ue_golomb_long(gb);
if (length*8LL > get_bits_left(gb)) {
av_log(s->avctx, AV_LOG_ERROR, "too many slice_header_extension_data_bytes\n");
return AVERROR_INVALIDDATA;
}
for (i = 0; i < length; i++)
skip_bits(gb, 8); // slice_header_extension_data_byte
}
// Inferred parameters
sh->slice_qp = 26U + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
if (sh->slice_qp > 51 ||
sh->slice_qp < -s->ps.sps->qp_bd_offset) {
av_log(s->avctx, AV_LOG_ERROR,
"The slice_qp %d is outside the valid range "
"[%d, 51].\n",
sh->slice_qp,
-s->ps.sps->qp_bd_offset);
return AVERROR_INVALIDDATA;
}
sh->slice_ctb_addr_rs = sh->slice_segment_addr;
if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
return AVERROR_INVALIDDATA;
}
if (get_bits_left(gb) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"Overread slice header by %d bits\n", -get_bits_left(gb));
return AVERROR_INVALIDDATA;
}
s->HEVClc->first_qp_group = !s->sh.dependent_slice_segment_flag;
if (!s->ps.pps->cu_qp_delta_enabled_flag)
s->HEVClc->qp_y = s->sh.slice_qp;
s->slice_initialized = 1;
s->HEVClc->tu.cu_qp_offset_cb = 0;
s->HEVClc->tu.cu_qp_offset_cr = 0;
return 0;
}
#define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
#define SET_SAO(elem, value) \
do { \
if (!sao_merge_up_flag && !sao_merge_left_flag) \
sao->elem = value; \
else if (sao_merge_left_flag) \
sao->elem = CTB(s->sao, rx-1, ry).elem; \
else if (sao_merge_up_flag) \
sao->elem = CTB(s->sao, rx, ry-1).elem; \
else \
sao->elem = 0; \
} while (0)
static void hls_sao_param(HEVCContext *s, int rx, int ry)
{
HEVCLocalContext *lc = s->HEVClc;
int sao_merge_left_flag = 0;
int sao_merge_up_flag = 0;
SAOParams *sao = &CTB(s->sao, rx, ry);
int c_idx, i;
if (s->sh.slice_sample_adaptive_offset_flag[0] ||
s->sh.slice_sample_adaptive_offset_flag[1]) {
if (rx > 0) {
if (lc->ctb_left_flag)
sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
}
if (ry > 0 && !sao_merge_left_flag) {
if (lc->ctb_up_flag)
sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
}
}
for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
int log2_sao_offset_scale = c_idx == 0 ? s->ps.pps->log2_sao_offset_scale_luma :
s->ps.pps->log2_sao_offset_scale_chroma;
if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
sao->type_idx[c_idx] = SAO_NOT_APPLIED;
continue;
}
if (c_idx == 2) {
sao->type_idx[2] = sao->type_idx[1];
sao->eo_class[2] = sao->eo_class[1];
} else {
SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
}
if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
continue;
for (i = 0; i < 4; i++)
SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
if (sao->type_idx[c_idx] == SAO_BAND) {
for (i = 0; i < 4; i++) {
if (sao->offset_abs[c_idx][i]) {
SET_SAO(offset_sign[c_idx][i],
ff_hevc_sao_offset_sign_decode(s));
} else {
sao->offset_sign[c_idx][i] = 0;
}
}
SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
} else if (c_idx != 2) {
SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
}
// Inferred parameters
sao->offset_val[c_idx][0] = 0;
for (i = 0; i < 4; i++) {
sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i];
if (sao->type_idx[c_idx] == SAO_EDGE) {
if (i > 1)
sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
} else if (sao->offset_sign[c_idx][i]) {
sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
}
sao->offset_val[c_idx][i + 1] *= 1 << log2_sao_offset_scale;
}
}
}
#undef SET_SAO
#undef CTB
static int hls_cross_component_pred(HEVCContext *s, int idx) {
HEVCLocalContext *lc = s->HEVClc;
int log2_res_scale_abs_plus1 = ff_hevc_log2_res_scale_abs(s, idx);
if (log2_res_scale_abs_plus1 != 0) {
int res_scale_sign_flag = ff_hevc_res_scale_sign_flag(s, idx);
lc->tu.res_scale_val = (1 << (log2_res_scale_abs_plus1 - 1)) *
(1 - 2 * res_scale_sign_flag);
} else {
lc->tu.res_scale_val = 0;
}
return 0;
}
static int hls_transform_unit(HEVCContext *s, int x0, int y0,
int xBase, int yBase, int cb_xBase, int cb_yBase,
int log2_cb_size, int log2_trafo_size,
int blk_idx, int cbf_luma, int *cbf_cb, int *cbf_cr)
{
HEVCLocalContext *lc = s->HEVClc;
const int log2_trafo_size_c = log2_trafo_size - s->ps.sps->hshift[1];
int i;
if (lc->cu.pred_mode == MODE_INTRA) {
int trafo_size = 1 << log2_trafo_size;
ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
}
if (cbf_luma || cbf_cb[0] || cbf_cr[0] ||
(s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
int scan_idx = SCAN_DIAG;
int scan_idx_c = SCAN_DIAG;
int cbf_chroma = cbf_cb[0] || cbf_cr[0] ||
(s->ps.sps->chroma_format_idc == 2 &&
(cbf_cb[1] || cbf_cr[1]));
if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s);
if (lc->tu.cu_qp_delta != 0)
if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
lc->tu.is_cu_qp_delta_coded = 1;
if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
av_log(s->avctx, AV_LOG_ERROR,
"The cu_qp_delta %d is outside the valid range "
"[%d, %d].\n",
lc->tu.cu_qp_delta,
-(26 + s->ps.sps->qp_bd_offset / 2),
(25 + s->ps.sps->qp_bd_offset / 2));
return AVERROR_INVALIDDATA;
}
ff_hevc_set_qPy(s, cb_xBase, cb_yBase, log2_cb_size);
}
if (s->sh.cu_chroma_qp_offset_enabled_flag && cbf_chroma &&
!lc->cu.cu_transquant_bypass_flag && !lc->tu.is_cu_chroma_qp_offset_coded) {
int cu_chroma_qp_offset_flag = ff_hevc_cu_chroma_qp_offset_flag(s);
if (cu_chroma_qp_offset_flag) {
int cu_chroma_qp_offset_idx = 0;
if (s->ps.pps->chroma_qp_offset_list_len_minus1 > 0) {
cu_chroma_qp_offset_idx = ff_hevc_cu_chroma_qp_offset_idx(s);
av_log(s->avctx, AV_LOG_ERROR,
"cu_chroma_qp_offset_idx not yet tested.\n");
}
lc->tu.cu_qp_offset_cb = s->ps.pps->cb_qp_offset_list[cu_chroma_qp_offset_idx];
lc->tu.cu_qp_offset_cr = s->ps.pps->cr_qp_offset_list[cu_chroma_qp_offset_idx];
} else {
lc->tu.cu_qp_offset_cb = 0;
lc->tu.cu_qp_offset_cr = 0;
}
lc->tu.is_cu_chroma_qp_offset_coded = 1;
}
if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
if (lc->tu.intra_pred_mode >= 6 &&
lc->tu.intra_pred_mode <= 14) {
scan_idx = SCAN_VERT;
} else if (lc->tu.intra_pred_mode >= 22 &&
lc->tu.intra_pred_mode <= 30) {
scan_idx = SCAN_HORIZ;
}
if (lc->tu.intra_pred_mode_c >= 6 &&
lc->tu.intra_pred_mode_c <= 14) {
scan_idx_c = SCAN_VERT;
} else if (lc->tu.intra_pred_mode_c >= 22 &&
lc->tu.intra_pred_mode_c <= 30) {
scan_idx_c = SCAN_HORIZ;
}
}
lc->tu.cross_pf = 0;
if (cbf_luma)
ff_hevc_hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
lc->tu.cross_pf = (s->ps.pps->cross_component_prediction_enabled_flag && cbf_luma &&
(lc->cu.pred_mode == MODE_INTER ||
(lc->tu.chroma_mode_c == 4)));
if (lc->tu.cross_pf) {
hls_cross_component_pred(s, 0);
}
for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
if (lc->cu.pred_mode == MODE_INTRA) {
ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 1);
}
if (cbf_cb[i])
ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
log2_trafo_size_c, scan_idx_c, 1);
else
if (lc->tu.cross_pf) {
ptrdiff_t stride = s->frame->linesize[1];
int hshift = s->ps.sps->hshift[1];
int vshift = s->ps.sps->vshift[1];
int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
int size = 1 << log2_trafo_size_c;
uint8_t *dst = &s->frame->data[1][(y0 >> vshift) * stride +
((x0 >> hshift) << s->ps.sps->pixel_shift)];
for (i = 0; i < (size * size); i++) {
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
}
s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
}
}
if (lc->tu.cross_pf) {
hls_cross_component_pred(s, 1);
}
for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
if (lc->cu.pred_mode == MODE_INTRA) {
ff_hevc_set_neighbour_available(s, x0, y0 + (i << log2_trafo_size_c), trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (i << log2_trafo_size_c), 2);
}
if (cbf_cr[i])
ff_hevc_hls_residual_coding(s, x0, y0 + (i << log2_trafo_size_c),
log2_trafo_size_c, scan_idx_c, 2);
else
if (lc->tu.cross_pf) {
ptrdiff_t stride = s->frame->linesize[2];
int hshift = s->ps.sps->hshift[2];
int vshift = s->ps.sps->vshift[2];
int16_t *coeffs_y = (int16_t*)lc->edge_emu_buffer;
int16_t *coeffs = (int16_t*)lc->edge_emu_buffer2;
int size = 1 << log2_trafo_size_c;
uint8_t *dst = &s->frame->data[2][(y0 >> vshift) * stride +
((x0 >> hshift) << s->ps.sps->pixel_shift)];
for (i = 0; i < (size * size); i++) {
coeffs[i] = ((lc->tu.res_scale_val * coeffs_y[i]) >> 3);
}
s->hevcdsp.add_residual[log2_trafo_size_c-2](dst, coeffs, stride);
}
}
} else if (s->ps.sps->chroma_format_idc && blk_idx == 3) {
int trafo_size_h = 1 << (log2_trafo_size + 1);
int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
if (lc->cu.pred_mode == MODE_INTRA) {
ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 1);
}
if (cbf_cb[i])
ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
log2_trafo_size, scan_idx_c, 1);
}
for (i = 0; i < (s->ps.sps->chroma_format_idc == 2 ? 2 : 1); i++) {
if (lc->cu.pred_mode == MODE_INTRA) {
ff_hevc_set_neighbour_available(s, xBase, yBase + (i << log2_trafo_size),
trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (i << log2_trafo_size), 2);
}
if (cbf_cr[i])
ff_hevc_hls_residual_coding(s, xBase, yBase + (i << log2_trafo_size),
log2_trafo_size, scan_idx_c, 2);
}
}
} else if (s->ps.sps->chroma_format_idc && lc->cu.pred_mode == MODE_INTRA) {
if (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3) {
int trafo_size_h = 1 << (log2_trafo_size_c + s->ps.sps->hshift[1]);
int trafo_size_v = 1 << (log2_trafo_size_c + s->ps.sps->vshift[1]);
ff_hevc_set_neighbour_available(s, x0, y0, trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 1);
s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0, 2);
if (s->ps.sps->chroma_format_idc == 2) {
ff_hevc_set_neighbour_available(s, x0, y0 + (1 << log2_trafo_size_c),
trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 1);
s->hpc.intra_pred[log2_trafo_size_c - 2](s, x0, y0 + (1 << log2_trafo_size_c), 2);
}
} else if (blk_idx == 3) {
int trafo_size_h = 1 << (log2_trafo_size + 1);
int trafo_size_v = 1 << (log2_trafo_size + s->ps.sps->vshift[1]);
ff_hevc_set_neighbour_available(s, xBase, yBase,
trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
if (s->ps.sps->chroma_format_idc == 2) {
ff_hevc_set_neighbour_available(s, xBase, yBase + (1 << (log2_trafo_size)),
trafo_size_h, trafo_size_v);
s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 1);
s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase + (1 << (log2_trafo_size)), 2);
}
}
}
return 0;
}
static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
{
int cb_size = 1 << log2_cb_size;
int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
int min_pu_width = s->ps.sps->min_pu_width;
int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
int i, j;
for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
s->is_pcm[i + j * min_pu_width] = 2;
}
static int hls_transform_tree(HEVCContext *s, int x0, int y0,
int xBase, int yBase, int cb_xBase, int cb_yBase,
int log2_cb_size, int log2_trafo_size,
int trafo_depth, int blk_idx,
const int *base_cbf_cb, const int *base_cbf_cr)
{
HEVCLocalContext *lc = s->HEVClc;
uint8_t split_transform_flag;
int cbf_cb[2];
int cbf_cr[2];
int ret;
cbf_cb[0] = base_cbf_cb[0];
cbf_cb[1] = base_cbf_cb[1];
cbf_cr[0] = base_cbf_cr[0];
cbf_cr[1] = base_cbf_cr[1];
if (lc->cu.intra_split_flag) {
if (trafo_depth == 1) {
lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
if (s->ps.sps->chroma_format_idc == 3) {
lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[blk_idx];
lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[blk_idx];
} else {
lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
}
}
} else {
lc->tu.intra_pred_mode = lc->pu.intra_pred_mode[0];
lc->tu.intra_pred_mode_c = lc->pu.intra_pred_mode_c[0];
lc->tu.chroma_mode_c = lc->pu.chroma_mode_c[0];
}
if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
log2_trafo_size > s->ps.sps->log2_min_tb_size &&
trafo_depth < lc->cu.max_trafo_depth &&
!(lc->cu.intra_split_flag && trafo_depth == 0)) {
split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
} else {
int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
lc->cu.pred_mode == MODE_INTER &&
lc->cu.part_mode != PART_2Nx2N &&
trafo_depth == 0;
split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
(lc->cu.intra_split_flag && trafo_depth == 0) ||
inter_split;
}
if (s->ps.sps->chroma_format_idc && (log2_trafo_size > 2 || s->ps.sps->chroma_format_idc == 3)) {
if (trafo_depth == 0 || cbf_cb[0]) {
cbf_cb[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
cbf_cb[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
}
}
if (trafo_depth == 0 || cbf_cr[0]) {
cbf_cr[0] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
if (s->ps.sps->chroma_format_idc == 2 && (!split_transform_flag || log2_trafo_size == 3)) {
cbf_cr[1] = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
}
}
}
if (split_transform_flag) {
const int trafo_size_split = 1 << (log2_trafo_size - 1);
const int x1 = x0 + trafo_size_split;
const int y1 = y0 + trafo_size_split;
#define SUBDIVIDE(x, y, idx) \
do { \
ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
log2_trafo_size - 1, trafo_depth + 1, idx, \
cbf_cb, cbf_cr); \
if (ret < 0) \
return ret; \
} while (0)
SUBDIVIDE(x0, y0, 0);
SUBDIVIDE(x1, y0, 1);
SUBDIVIDE(x0, y1, 2);
SUBDIVIDE(x1, y1, 3);
#undef SUBDIVIDE
} else {
int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
int min_tu_width = s->ps.sps->min_tb_width;
int cbf_luma = 1;
if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
cbf_cb[0] || cbf_cr[0] ||
(s->ps.sps->chroma_format_idc == 2 && (cbf_cb[1] || cbf_cr[1]))) {
cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
}
ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
log2_cb_size, log2_trafo_size,
blk_idx, cbf_luma, cbf_cb, cbf_cr);
if (ret < 0)
return ret;
// TODO: store cbf_luma somewhere else
if (cbf_luma) {
int i, j;
for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
int x_tu = (x0 + j) >> log2_min_tu_size;
int y_tu = (y0 + i) >> log2_min_tu_size;
s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
}
}
if (!s->sh.disable_deblocking_filter_flag) {
ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
if (s->ps.pps->transquant_bypass_enable_flag &&
lc->cu.cu_transquant_bypass_flag)
set_deblocking_bypass(s, x0, y0, log2_trafo_size);
}
}
return 0;
}
static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
{
HEVCLocalContext *lc = s->HEVClc;
GetBitContext gb;
int cb_size = 1 << log2_cb_size;
ptrdiff_t stride0 = s->frame->linesize[0];
ptrdiff_t stride1 = s->frame->linesize[1];
ptrdiff_t stride2 = s->frame->linesize[2];
uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth +
(((cb_size >> s->ps.sps->hshift[1]) * (cb_size >> s->ps.sps->vshift[1])) +
((cb_size >> s->ps.sps->hshift[2]) * (cb_size >> s->ps.sps->vshift[2]))) *
s->ps.sps->pcm.bit_depth_chroma;
const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
int ret;
if (!s->sh.disable_deblocking_filter_flag)
ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
ret = init_get_bits(&gb, pcm, length);
if (ret < 0)
return ret;
s->hevcdsp.put_pcm(dst0, stride0, cb_size, cb_size, &gb, s->ps.sps->pcm.bit_depth);
if (s->ps.sps->chroma_format_idc) {
s->hevcdsp.put_pcm(dst1, stride1,
cb_size >> s->ps.sps->hshift[1],
cb_size >> s->ps.sps->vshift[1],
&gb, s->ps.sps->pcm.bit_depth_chroma);
s->hevcdsp.put_pcm(dst2, stride2,
cb_size >> s->ps.sps->hshift[2],
cb_size >> s->ps.sps->vshift[2],
&gb, s->ps.sps->pcm.bit_depth_chroma);
}
return 0;
}
/**
* 8.5.3.2.2.1 Luma sample unidirectional interpolation process
*
* @param s HEVC decoding context
* @param dst target buffer for block data at block position
* @param dststride stride of the dst buffer
* @param ref reference picture buffer at origin (0, 0)
* @param mv motion vector (relative to block position) to get pixel data from
* @param x_off horizontal position of block from origin (0, 0)
* @param y_off vertical position of block from origin (0, 0)
* @param block_w width of block
* @param block_h height of block
* @param luma_weight weighting factor applied to the luma prediction
* @param luma_offset additive offset applied to the luma prediction value
*/
static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
AVFrame *ref, const Mv *mv, int x_off, int y_off,
int block_w, int block_h, int luma_weight, int luma_offset)
{
HEVCLocalContext *lc = s->HEVClc;
uint8_t *src = ref->data[0];
ptrdiff_t srcstride = ref->linesize[0];
int pic_width = s->ps.sps->width;
int pic_height = s->ps.sps->height;
int mx = mv->x & 3;
int my = mv->y & 3;
int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
(s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
int idx = ff_hevc_pel_weight[block_w];
x_off += mv->x >> 2;
y_off += mv->y >> 2;
src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER ||
x_off >= pic_width - block_w - QPEL_EXTRA_AFTER ||
y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) {
const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
edge_emu_stride, srcstride,
block_w + QPEL_EXTRA,
block_h + QPEL_EXTRA,
x_off - QPEL_EXTRA_BEFORE, y_off - QPEL_EXTRA_BEFORE,
pic_width, pic_height);
src = lc->edge_emu_buffer + buf_offset;
srcstride = edge_emu_stride;
}
if (!weight_flag)
s->hevcdsp.put_hevc_qpel_uni[idx][!!my][!!mx](dst, dststride, src, srcstride,
block_h, mx, my, block_w);
else
s->hevcdsp.put_hevc_qpel_uni_w[idx][!!my][!!mx](dst, dststride, src, srcstride,
block_h, s->sh.luma_log2_weight_denom,
luma_weight, luma_offset, mx, my, block_w);
}
/**
* 8.5.3.2.2.1 Luma sample bidirectional interpolation process
*
* @param s HEVC decoding context
* @param dst target buffer for block data at block position
* @param dststride stride of the dst buffer
* @param ref0 reference picture0 buffer at origin (0, 0)
* @param mv0 motion vector0 (relative to block position) to get pixel data from
* @param x_off horizontal position of block from origin (0, 0)
* @param y_off vertical position of block from origin (0, 0)
* @param block_w width of block
* @param block_h height of block
* @param ref1 reference picture1 buffer at origin (0, 0)
* @param mv1 motion vector1 (relative to block position) to get pixel data from
* @param current_mv current motion vector structure
*/
static void luma_mc_bi(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride,
AVFrame *ref0, const Mv *mv0, int x_off, int y_off,
int block_w, int block_h, AVFrame *ref1, const Mv *mv1, struct MvField *current_mv)
{
HEVCLocalContext *lc = s->HEVClc;
ptrdiff_t src0stride = ref0->linesize[0];
ptrdiff_t src1stride = ref1->linesize[0];
int pic_width = s->ps.sps->width;
int pic_height = s->ps.sps->height;
int mx0 = mv0->x & 3;
int my0 = mv0->y & 3;
int mx1 = mv1->x & 3;
int my1 = mv1->y & 3;
int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
(s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
int x_off0 = x_off + (mv0->x >> 2);
int y_off0 = y_off + (mv0->y >> 2);
int x_off1 = x_off + (mv1->x >> 2);
int y_off1 = y_off + (mv1->y >> 2);
int idx = ff_hevc_pel_weight[block_w];
uint8_t *src0 = ref0->data[0] + y_off0 * src0stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
uint8_t *src1 = ref1->data[0] + y_off1 * src1stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
if (x_off0 < QPEL_EXTRA_BEFORE || y_off0 < QPEL_EXTRA_AFTER ||
x_off0 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
y_off0 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
int offset = QPEL_EXTRA_BEFORE * src0stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset,
edge_emu_stride, src0stride,
block_w + QPEL_EXTRA,
block_h + QPEL_EXTRA,
x_off0 - QPEL_EXTRA_BEFORE, y_off0 - QPEL_EXTRA_BEFORE,
pic_width, pic_height);
src0 = lc->edge_emu_buffer + buf_offset;
src0stride = edge_emu_stride;
}
if (x_off1 < QPEL_EXTRA_BEFORE || y_off1 < QPEL_EXTRA_AFTER ||
x_off1 >= pic_width - block_w - QPEL_EXTRA_AFTER ||
y_off1 >= pic_height - block_h - QPEL_EXTRA_AFTER) {
const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
int offset = QPEL_EXTRA_BEFORE * src1stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift);
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src1 - offset,
edge_emu_stride, src1stride,
block_w + QPEL_EXTRA,
block_h + QPEL_EXTRA,
x_off1 - QPEL_EXTRA_BEFORE, y_off1 - QPEL_EXTRA_BEFORE,
pic_width, pic_height);
src1 = lc->edge_emu_buffer2 + buf_offset;
src1stride = edge_emu_stride;
}
s->hevcdsp.put_hevc_qpel[idx][!!my0][!!mx0](lc->tmp, src0, src0stride,
block_h, mx0, my0, block_w);
if (!weight_flag)
s->hevcdsp.put_hevc_qpel_bi[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
block_h, mx1, my1, block_w);
else
s->hevcdsp.put_hevc_qpel_bi_w[idx][!!my1][!!mx1](dst, dststride, src1, src1stride, lc->tmp,
block_h, s->sh.luma_log2_weight_denom,
s->sh.luma_weight_l0[current_mv->ref_idx[0]],
s->sh.luma_weight_l1[current_mv->ref_idx[1]],
s->sh.luma_offset_l0[current_mv->ref_idx[0]],
s->sh.luma_offset_l1[current_mv->ref_idx[1]],
mx1, my1, block_w);
}
/**
* 8.5.3.2.2.2 Chroma sample uniprediction interpolation process
*
* @param s HEVC decoding context
* @param dst1 target buffer for block data at block position (U plane)
* @param dst2 target buffer for block data at block position (V plane)
* @param dststride stride of the dst1 and dst2 buffers
* @param ref reference picture buffer at origin (0, 0)
* @param mv motion vector (relative to block position) to get pixel data from
* @param x_off horizontal position of block from origin (0, 0)
* @param y_off vertical position of block from origin (0, 0)
* @param block_w width of block
* @param block_h height of block
* @param chroma_weight weighting factor applied to the chroma prediction
* @param chroma_offset additive offset applied to the chroma prediction value
*/
static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0,
ptrdiff_t dststride, uint8_t *src0, ptrdiff_t srcstride, int reflist,
int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int chroma_weight, int chroma_offset)
{
HEVCLocalContext *lc = s->HEVClc;
int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
const Mv *mv = ¤t_mv->mv[reflist];
int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
(s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
int idx = ff_hevc_pel_weight[block_w];
int hshift = s->ps.sps->hshift[1];
int vshift = s->ps.sps->vshift[1];
intptr_t mx = av_mod_uintp2(mv->x, 2 + hshift);
intptr_t my = av_mod_uintp2(mv->y, 2 + vshift);
intptr_t _mx = mx << (1 - hshift);
intptr_t _my = my << (1 - vshift);
x_off += mv->x >> (2 + hshift);
y_off += mv->y >> (2 + vshift);
src0 += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift));
int buf_offset0 = EPEL_EXTRA_BEFORE *
(edge_emu_stride + (1 << s->ps.sps->pixel_shift));
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src0 - offset0,
edge_emu_stride, srcstride,
block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
x_off - EPEL_EXTRA_BEFORE,
y_off - EPEL_EXTRA_BEFORE,
pic_width, pic_height);
src0 = lc->edge_emu_buffer + buf_offset0;
srcstride = edge_emu_stride;
}
if (!weight_flag)
s->hevcdsp.put_hevc_epel_uni[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
block_h, _mx, _my, block_w);
else
s->hevcdsp.put_hevc_epel_uni_w[idx][!!my][!!mx](dst0, dststride, src0, srcstride,
block_h, s->sh.chroma_log2_weight_denom,
chroma_weight, chroma_offset, _mx, _my, block_w);
}
/**
* 8.5.3.2.2.2 Chroma sample bidirectional interpolation process
*
* @param s HEVC decoding context
* @param dst target buffer for block data at block position
* @param dststride stride of the dst buffer
* @param ref0 reference picture0 buffer at origin (0, 0)
* @param mv0 motion vector0 (relative to block position) to get pixel data from
* @param x_off horizontal position of block from origin (0, 0)
* @param y_off vertical position of block from origin (0, 0)
* @param block_w width of block
* @param block_h height of block
* @param ref1 reference picture1 buffer at origin (0, 0)
* @param mv1 motion vector1 (relative to block position) to get pixel data from
* @param current_mv current motion vector structure
* @param cidx chroma component(cb, cr)
*/
static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, ptrdiff_t dststride, AVFrame *ref0, AVFrame *ref1,
int x_off, int y_off, int block_w, int block_h, struct MvField *current_mv, int cidx)
{
HEVCLocalContext *lc = s->HEVClc;
uint8_t *src1 = ref0->data[cidx+1];
uint8_t *src2 = ref1->data[cidx+1];
ptrdiff_t src1stride = ref0->linesize[cidx+1];
ptrdiff_t src2stride = ref1->linesize[cidx+1];
int weight_flag = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
(s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
int pic_width = s->ps.sps->width >> s->ps.sps->hshift[1];
int pic_height = s->ps.sps->height >> s->ps.sps->vshift[1];
Mv *mv0 = ¤t_mv->mv[0];
Mv *mv1 = ¤t_mv->mv[1];
int hshift = s->ps.sps->hshift[1];
int vshift = s->ps.sps->vshift[1];
intptr_t mx0 = av_mod_uintp2(mv0->x, 2 + hshift);
intptr_t my0 = av_mod_uintp2(mv0->y, 2 + vshift);
intptr_t mx1 = av_mod_uintp2(mv1->x, 2 + hshift);
intptr_t my1 = av_mod_uintp2(mv1->y, 2 + vshift);
intptr_t _mx0 = mx0 << (1 - hshift);
intptr_t _my0 = my0 << (1 - vshift);
intptr_t _mx1 = mx1 << (1 - hshift);
intptr_t _my1 = my1 << (1 - vshift);
int x_off0 = x_off + (mv0->x >> (2 + hshift));
int y_off0 = y_off + (mv0->y >> (2 + vshift));
int x_off1 = x_off + (mv1->x >> (2 + hshift));
int y_off1 = y_off + (mv1->y >> (2 + vshift));
int idx = ff_hevc_pel_weight[block_w];
src1 += y_off0 * src1stride + (int)((unsigned)x_off0 << s->ps.sps->pixel_shift);
src2 += y_off1 * src2stride + (int)((unsigned)x_off1 << s->ps.sps->pixel_shift);
if (x_off0 < EPEL_EXTRA_BEFORE || y_off0 < EPEL_EXTRA_AFTER ||
x_off0 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
y_off0 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
int buf_offset1 = EPEL_EXTRA_BEFORE *
(edge_emu_stride + (1 << s->ps.sps->pixel_shift));
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
edge_emu_stride, src1stride,
block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
x_off0 - EPEL_EXTRA_BEFORE,
y_off0 - EPEL_EXTRA_BEFORE,
pic_width, pic_height);
src1 = lc->edge_emu_buffer + buf_offset1;
src1stride = edge_emu_stride;
}
if (x_off1 < EPEL_EXTRA_BEFORE || y_off1 < EPEL_EXTRA_AFTER ||
x_off1 >= pic_width - block_w - EPEL_EXTRA_AFTER ||
y_off1 >= pic_height - block_h - EPEL_EXTRA_AFTER) {
const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
int offset1 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
int buf_offset1 = EPEL_EXTRA_BEFORE *
(edge_emu_stride + (1 << s->ps.sps->pixel_shift));
s->vdsp.emulated_edge_mc(lc->edge_emu_buffer2, src2 - offset1,
edge_emu_stride, src2stride,
block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
x_off1 - EPEL_EXTRA_BEFORE,
y_off1 - EPEL_EXTRA_BEFORE,
pic_width, pic_height);
src2 = lc->edge_emu_buffer2 + buf_offset1;
src2stride = edge_emu_stride;
}
s->hevcdsp.put_hevc_epel[idx][!!my0][!!mx0](lc->tmp, src1, src1stride,
block_h, _mx0, _my0, block_w);
if (!weight_flag)
s->hevcdsp.put_hevc_epel_bi[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
src2, src2stride, lc->tmp,
block_h, _mx1, _my1, block_w);
else
s->hevcdsp.put_hevc_epel_bi_w[idx][!!my1][!!mx1](dst0, s->frame->linesize[cidx+1],
src2, src2stride, lc->tmp,
block_h,
s->sh.chroma_log2_weight_denom,
s->sh.chroma_weight_l0[current_mv->ref_idx[0]][cidx],
s->sh.chroma_weight_l1[current_mv->ref_idx[1]][cidx],
s->sh.chroma_offset_l0[current_mv->ref_idx[0]][cidx],
s->sh.chroma_offset_l1[current_mv->ref_idx[1]][cidx],
_mx1, _my1, block_w);
}
static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref,
const Mv *mv, int y0, int height)
{
if (s->threads_type == FF_THREAD_FRAME ) {
int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
ff_thread_await_progress(&ref->tf, y, 0);
}
}
static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
int nPbH, int log2_cb_size, int part_idx,
int merge_idx, MvField *mv)
{
HEVCLocalContext *lc = s->HEVClc;
enum InterPredIdc inter_pred_idc = PRED_L0;
int mvp_flag;
ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
mv->pred_flag = 0;
if (s->sh.slice_type == HEVC_SLICE_B)
inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
if (inter_pred_idc != PRED_L1) {
if (s->sh.nb_refs[L0])
mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
mv->pred_flag = PF_L0;
ff_hevc_hls_mvd_coding(s, x0, y0, 0);
mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
part_idx, merge_idx, mv, mvp_flag, 0);
mv->mv[0].x += lc->pu.mvd.x;
mv->mv[0].y += lc->pu.mvd.y;
}
if (inter_pred_idc != PRED_L0) {
if (s->sh.nb_refs[L1])
mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
AV_ZERO32(&lc->pu.mvd);
} else {
ff_hevc_hls_mvd_coding(s, x0, y0, 1);
}
mv->pred_flag += PF_L1;
mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
part_idx, merge_idx, mv, mvp_flag, 1);
mv->mv[1].x += lc->pu.mvd.x;
mv->mv[1].y += lc->pu.mvd.y;
}
}
static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
int nPbW, int nPbH,
int log2_cb_size, int partIdx, int idx)
{
#define POS(c_idx, x, y) \
&s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
(((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
HEVCLocalContext *lc = s->HEVClc;
int merge_idx = 0;
struct MvField current_mv = {{{ 0 }}};
int min_pu_width = s->ps.sps->min_pu_width;
MvField *tab_mvf = s->ref->tab_mvf;
RefPicList *refPicList = s->ref->refPicList;
HEVCFrame *ref0 = NULL, *ref1 = NULL;
uint8_t *dst0 = POS(0, x0, y0);
uint8_t *dst1 = POS(1, x0, y0);
uint8_t *dst2 = POS(2, x0, y0);
int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
int min_cb_width = s->ps.sps->min_cb_width;
int x_cb = x0 >> log2_min_cb_size;
int y_cb = y0 >> log2_min_cb_size;
int x_pu, y_pu;
int i, j;
int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
if (!skip_flag)
lc->pu.merge_flag = ff_hevc_merge_flag_decode(s);
if (skip_flag || lc->pu.merge_flag) {
if (s->sh.max_num_merge_cand > 1)
merge_idx = ff_hevc_merge_idx_decode(s);
else
merge_idx = 0;
ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
partIdx, merge_idx, ¤t_mv);
} else {
hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
partIdx, merge_idx, ¤t_mv);
}
x_pu = x0 >> s->ps.sps->log2_min_pu_size;
y_pu = y0 >> s->ps.sps->log2_min_pu_size;
for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
if (current_mv.pred_flag & PF_L0) {
ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
if (!ref0)
return;
hevc_await_progress(s, ref0, ¤t_mv.mv[0], y0, nPbH);
}
if (current_mv.pred_flag & PF_L1) {
ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
if (!ref1)
return;
hevc_await_progress(s, ref1, ¤t_mv.mv[1], y0, nPbH);
}
if (current_mv.pred_flag == PF_L0) {
int x0_c = x0 >> s->ps.sps->hshift[1];
int y0_c = y0 >> s->ps.sps->vshift[1];
int nPbW_c = nPbW >> s->ps.sps->hshift[1];
int nPbH_c = nPbH >> s->ps.sps->vshift[1];
luma_mc_uni(s, dst0, s->frame->linesize[0], ref0->frame,
¤t_mv.mv[0], x0, y0, nPbW, nPbH,
s->sh.luma_weight_l0[current_mv.ref_idx[0]],
s->sh.luma_offset_l0[current_mv.ref_idx[0]]);
if (s->ps.sps->chroma_format_idc) {
chroma_mc_uni(s, dst1, s->frame->linesize[1], ref0->frame->data[1], ref0->frame->linesize[1],
0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0]);
chroma_mc_uni(s, dst2, s->frame->linesize[2], ref0->frame->data[2], ref0->frame->linesize[2],
0, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1], s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1]);
}
} else if (current_mv.pred_flag == PF_L1) {
int x0_c = x0 >> s->ps.sps->hshift[1];
int y0_c = y0 >> s->ps.sps->vshift[1];
int nPbW_c = nPbW >> s->ps.sps->hshift[1];
int nPbH_c = nPbH >> s->ps.sps->vshift[1];
luma_mc_uni(s, dst0, s->frame->linesize[0], ref1->frame,
¤t_mv.mv[1], x0, y0, nPbW, nPbH,
s->sh.luma_weight_l1[current_mv.ref_idx[1]],
s->sh.luma_offset_l1[current_mv.ref_idx[1]]);
if (s->ps.sps->chroma_format_idc) {
chroma_mc_uni(s, dst1, s->frame->linesize[1], ref1->frame->data[1], ref1->frame->linesize[1],
1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0]);
chroma_mc_uni(s, dst2, s->frame->linesize[2], ref1->frame->data[2], ref1->frame->linesize[2],
1, x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv,
s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1], s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1]);
}
} else if (current_mv.pred_flag == PF_BI) {
int x0_c = x0 >> s->ps.sps->hshift[1];
int y0_c = y0 >> s->ps.sps->vshift[1];
int nPbW_c = nPbW >> s->ps.sps->hshift[1];
int nPbH_c = nPbH >> s->ps.sps->vshift[1];
luma_mc_bi(s, dst0, s->frame->linesize[0], ref0->frame,
¤t_mv.mv[0], x0, y0, nPbW, nPbH,
ref1->frame, ¤t_mv.mv[1], ¤t_mv);
if (s->ps.sps->chroma_format_idc) {
chroma_mc_bi(s, dst1, s->frame->linesize[1], ref0->frame, ref1->frame,
x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 0);
chroma_mc_bi(s, dst2, s->frame->linesize[2], ref0->frame, ref1->frame,
x0_c, y0_c, nPbW_c, nPbH_c, ¤t_mv, 1);
}
}
}
/**
* 8.4.1
*/
static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
int prev_intra_luma_pred_flag)
{
HEVCLocalContext *lc = s->HEVClc;
int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
int min_pu_width = s->ps.sps->min_pu_width;
int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
int x0b = av_mod_uintp2(x0, s->ps.sps->log2_ctb_size);
int y0b = av_mod_uintp2(y0, s->ps.sps->log2_ctb_size);
int cand_up = (lc->ctb_up_flag || y0b) ?
s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
int cand_left = (lc->ctb_left_flag || x0b) ?
s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
MvField *tab_mvf = s->ref->tab_mvf;
int intra_pred_mode;
int candidate[3];
int i, j;
// intra_pred_mode prediction does not cross vertical CTB boundaries
if ((y0 - 1) < y_ctb)
cand_up = INTRA_DC;
if (cand_left == cand_up) {
if (cand_left < 2) {
candidate[0] = INTRA_PLANAR;
candidate[1] = INTRA_DC;
candidate[2] = INTRA_ANGULAR_26;
} else {
candidate[0] = cand_left;
candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
}
} else {
candidate[0] = cand_left;
candidate[1] = cand_up;
if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
candidate[2] = INTRA_PLANAR;
} else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
candidate[2] = INTRA_DC;
} else {
candidate[2] = INTRA_ANGULAR_26;
}
}
if (prev_intra_luma_pred_flag) {
intra_pred_mode = candidate[lc->pu.mpm_idx];
} else {
if (candidate[0] > candidate[1])
FFSWAP(uint8_t, candidate[0], candidate[1]);
if (candidate[0] > candidate[2])
FFSWAP(uint8_t, candidate[0], candidate[2]);
if (candidate[1] > candidate[2])
FFSWAP(uint8_t, candidate[1], candidate[2]);
intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
for (i = 0; i < 3; i++)
if (intra_pred_mode >= candidate[i])
intra_pred_mode++;
}
/* write the intra prediction units into the mv array */
if (!size_in_pus)
size_in_pus = 1;
for (i = 0; i < size_in_pus; i++) {
memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
intra_pred_mode, size_in_pus);
for (j = 0; j < size_in_pus; j++) {
tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag = PF_INTRA;
}
}
return intra_pred_mode;
}
static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
int log2_cb_size, int ct_depth)
{
int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
int y;
for (y = 0; y < length; y++)
memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
ct_depth, length);
}
static const uint8_t tab_mode_idx[] = {
0, 1, 2, 2, 2, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 19, 20,
21, 22, 23, 23, 24, 24, 25, 25, 26, 27, 27, 28, 28, 29, 29, 30, 31};
static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
int log2_cb_size)
{
HEVCLocalContext *lc = s->HEVClc;
static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
uint8_t prev_intra_luma_pred_flag[4];
int split = lc->cu.part_mode == PART_NxN;
int pb_size = (1 << log2_cb_size) >> split;
int side = split + 1;
int chroma_mode;
int i, j;
for (i = 0; i < side; i++)
for (j = 0; j < side; j++)
prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
for (i = 0; i < side; i++) {
for (j = 0; j < side; j++) {
if (prev_intra_luma_pred_flag[2 * i + j])
lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(s);
else
lc->pu.rem_intra_luma_pred_mode = ff_hevc_rem_intra_luma_pred_mode_decode(s);
lc->pu.intra_pred_mode[2 * i + j] =
luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
prev_intra_luma_pred_flag[2 * i + j]);
}
}
if (s->ps.sps->chroma_format_idc == 3) {
for (i = 0; i < side; i++) {
for (j = 0; j < side; j++) {
lc->pu.chroma_mode_c[2 * i + j] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
if (chroma_mode != 4) {
if (lc->pu.intra_pred_mode[2 * i + j] == intra_chroma_table[chroma_mode])
lc->pu.intra_pred_mode_c[2 * i + j] = 34;
else
lc->pu.intra_pred_mode_c[2 * i + j] = intra_chroma_table[chroma_mode];
} else {
lc->pu.intra_pred_mode_c[2 * i + j] = lc->pu.intra_pred_mode[2 * i + j];
}
}
}
} else if (s->ps.sps->chroma_format_idc == 2) {
int mode_idx;
lc->pu.chroma_mode_c[0] = chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
if (chroma_mode != 4) {
if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
mode_idx = 34;
else
mode_idx = intra_chroma_table[chroma_mode];
} else {
mode_idx = lc->pu.intra_pred_mode[0];
}
lc->pu.intra_pred_mode_c[0] = tab_mode_idx[mode_idx];
} else if (s->ps.sps->chroma_format_idc != 0) {
chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
if (chroma_mode != 4) {
if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
lc->pu.intra_pred_mode_c[0] = 34;
else
lc->pu.intra_pred_mode_c[0] = intra_chroma_table[chroma_mode];
} else {
lc->pu.intra_pred_mode_c[0] = lc->pu.intra_pred_mode[0];
}
}
}
static void intra_prediction_unit_default_value(HEVCContext *s,
int x0, int y0,
int log2_cb_size)
{
HEVCLocalContext *lc = s->HEVClc;
int pb_size = 1 << log2_cb_size;
int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
int min_pu_width = s->ps.sps->min_pu_width;
MvField *tab_mvf = s->ref->tab_mvf;
int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
int j, k;
if (size_in_pus == 0)
size_in_pus = 1;
for (j = 0; j < size_in_pus; j++)
memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
if (lc->cu.pred_mode == MODE_INTRA)
for (j = 0; j < size_in_pus; j++)
for (k = 0; k < size_in_pus; k++)
tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].pred_flag = PF_INTRA;
}
static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
{
int cb_size = 1 << log2_cb_size;
HEVCLocalContext *lc = s->HEVClc;
int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
int length = cb_size >> log2_min_cb_size;
int min_cb_width = s->ps.sps->min_cb_width;
int x_cb = x0 >> log2_min_cb_size;
int y_cb = y0 >> log2_min_cb_size;
int idx = log2_cb_size - 2;
int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
int x, y, ret;
lc->cu.x = x0;
lc->cu.y = y0;
lc->cu.pred_mode = MODE_INTRA;
lc->cu.part_mode = PART_2Nx2N;
lc->cu.intra_split_flag = 0;
SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
for (x = 0; x < 4; x++)
lc->pu.intra_pred_mode[x] = 1;
if (s->ps.pps->transquant_bypass_enable_flag) {
lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s);
if (lc->cu.cu_transquant_bypass_flag)
set_deblocking_bypass(s, x0, y0, log2_cb_size);
} else
lc->cu.cu_transquant_bypass_flag = 0;
if (s->sh.slice_type != HEVC_SLICE_I) {
uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
x = y_cb * min_cb_width + x_cb;
for (y = 0; y < length; y++) {
memset(&s->skip_flag[x], skip_flag, length);
x += min_cb_width;
}
lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
} else {
x = y_cb * min_cb_width + x_cb;
for (y = 0; y < length; y++) {
memset(&s->skip_flag[x], 0, length);
x += min_cb_width;
}
}
if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
if (!s->sh.disable_deblocking_filter_flag)
ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
} else {
int pcm_flag = 0;
if (s->sh.slice_type != HEVC_SLICE_I)
lc->cu.pred_mode = ff_hevc_pred_mode_decode(s);
if (lc->cu.pred_mode != MODE_INTRA ||
log2_cb_size == s->ps.sps->log2_min_cb_size) {
lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
lc->cu.pred_mode == MODE_INTRA;
}
if (lc->cu.pred_mode == MODE_INTRA) {
if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
pcm_flag = ff_hevc_pcm_flag_decode(s);
}
if (pcm_flag) {
intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
if (s->ps.sps->pcm.loop_filter_disable_flag)
set_deblocking_bypass(s, x0, y0, log2_cb_size);
if (ret < 0)
return ret;
} else {
intra_prediction_unit(s, x0, y0, log2_cb_size);
}
} else {
intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
switch (lc->cu.part_mode) {
case PART_2Nx2N:
hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0, idx);
break;
case PART_2NxN:
hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0, idx);
hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1, idx);
break;
case PART_Nx2N:
hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0, idx - 1);
hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1, idx - 1);
break;
case PART_2NxnU:
hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0, idx);
hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1, idx);
break;
case PART_2NxnD:
hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0, idx);
hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1, idx);
break;
case PART_nLx2N:
hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0, idx - 2);
hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1, idx - 2);
break;
case PART_nRx2N:
hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0, idx - 2);
hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1, idx - 2);
break;
case PART_NxN:
hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0, idx - 1);
hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1, idx - 1);
hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2, idx - 1);
hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3, idx - 1);
break;
}
}
if (!pcm_flag) {
int rqt_root_cbf = 1;
if (lc->cu.pred_mode != MODE_INTRA &&
!(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
}
if (rqt_root_cbf) {
const static int cbf[2] = { 0 };
lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
s->ps.sps->max_transform_hierarchy_depth_inter;
ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
log2_cb_size,
log2_cb_size, 0, 0, cbf, cbf);
if (ret < 0)
return ret;
} else {
if (!s->sh.disable_deblocking_filter_flag)
ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
}
}
}
if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
ff_hevc_set_qPy(s, x0, y0, log2_cb_size);
x = y_cb * min_cb_width + x_cb;
for (y = 0; y < length; y++) {
memset(&s->qp_y_tab[x], lc->qp_y, length);
x += min_cb_width;
}
if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0) {
lc->qPy_pred = lc->qp_y;
}
set_ct_depth(s, x0, y0, log2_cb_size, lc->ct_depth);
return 0;
}
static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
int log2_cb_size, int cb_depth)
{
HEVCLocalContext *lc = s->HEVClc;
const int cb_size = 1 << log2_cb_size;
int ret;
int split_cu;
lc->ct_depth = cb_depth;
if (x0 + cb_size <= s->ps.sps->width &&
y0 + cb_size <= s->ps.sps->height &&
log2_cb_size > s->ps.sps->log2_min_cb_size) {
split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
} else {
split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
}
if (s->ps.pps->cu_qp_delta_enabled_flag &&
log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
lc->tu.is_cu_qp_delta_coded = 0;
lc->tu.cu_qp_delta = 0;
}
if (s->sh.cu_chroma_qp_offset_enabled_flag &&
log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_chroma_qp_offset_depth) {
lc->tu.is_cu_chroma_qp_offset_coded = 0;
}
if (split_cu) {
int qp_block_mask = (1<<(s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth)) - 1;
const int cb_size_split = cb_size >> 1;
const int x1 = x0 + cb_size_split;
const int y1 = y0 + cb_size_split;
int more_data = 0;
more_data = hls_coding_quadtree(s, x0, y0, log2_cb_size - 1, cb_depth + 1);
if (more_data < 0)
return more_data;
if (more_data && x1 < s->ps.sps->width) {
more_data = hls_coding_quadtree(s, x1, y0, log2_cb_size - 1, cb_depth + 1);
if (more_data < 0)
return more_data;
}
if (more_data && y1 < s->ps.sps->height) {
more_data = hls_coding_quadtree(s, x0, y1, log2_cb_size - 1, cb_depth + 1);
if (more_data < 0)
return more_data;
}
if (more_data && x1 < s->ps.sps->width &&
y1 < s->ps.sps->height) {
more_data = hls_coding_quadtree(s, x1, y1, log2_cb_size - 1, cb_depth + 1);
if (more_data < 0)
return more_data;
}
if(((x0 + (1<<log2_cb_size)) & qp_block_mask) == 0 &&
((y0 + (1<<log2_cb_size)) & qp_block_mask) == 0)
lc->qPy_pred = lc->qp_y;
if (more_data)
return ((x1 + cb_size_split) < s->ps.sps->width ||
(y1 + cb_size_split) < s->ps.sps->height);
else
return 0;
} else {
ret = hls_coding_unit(s, x0, y0, log2_cb_size);
if (ret < 0)
return ret;
if ((!((x0 + cb_size) %
(1 << (s->ps.sps->log2_ctb_size))) ||
(x0 + cb_size >= s->ps.sps->width)) &&
(!((y0 + cb_size) %
(1 << (s->ps.sps->log2_ctb_size))) ||
(y0 + cb_size >= s->ps.sps->height))) {
int end_of_slice_flag = ff_hevc_end_of_slice_flag_decode(s);
return !end_of_slice_flag;
} else {
return 1;
}
}
return 0;
}
static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
int ctb_addr_ts)
{
HEVCLocalContext *lc = s->HEVClc;
int ctb_size = 1 << s->ps.sps->log2_ctb_size;
int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
if (s->ps.pps->entropy_coding_sync_enabled_flag) {
if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
lc->first_qp_group = 1;
lc->end_of_tiles_x = s->ps.sps->width;
} else if (s->ps.pps->tiles_enabled_flag) {
if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
lc->first_qp_group = 1;
}
} else {
lc->end_of_tiles_x = s->ps.sps->width;
}
lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
lc->boundary_flags = 0;
if (s->ps.pps->tiles_enabled_flag) {
if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
lc->boundary_flags |= BOUNDARY_LEFT_TILE;
if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
lc->boundary_flags |= BOUNDARY_UPPER_TILE;
if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
} else {
if (ctb_addr_in_slice <= 0)
lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
if (ctb_addr_in_slice < s->ps.sps->ctb_width)
lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
}
lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
}
static int hls_decode_entry(AVCodecContext *avctxt, void *isFilterThread)
{
HEVCContext *s = avctxt->priv_data;
int ctb_size = 1 << s->ps.sps->log2_ctb_size;
int more_data = 1;
int x_ctb = 0;
int y_ctb = 0;
int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
int ret;
if (!ctb_addr_ts && s->sh.dependent_slice_segment_flag) {
av_log(s->avctx, AV_LOG_ERROR, "Impossible initial tile.\n");
return AVERROR_INVALIDDATA;
}
if (s->sh.dependent_slice_segment_flag) {
int prev_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts - 1];
if (s->tab_slice_address[prev_rs] != s->sh.slice_addr) {
av_log(s->avctx, AV_LOG_ERROR, "Previous slice segment missing\n");
return AVERROR_INVALIDDATA;
}
}
while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
ret = ff_hevc_cabac_init(s, ctb_addr_ts);
if (ret < 0) {
s->tab_slice_address[ctb_addr_rs] = -1;
return ret;
}
hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
if (more_data < 0) {
s->tab_slice_address[ctb_addr_rs] = -1;
return more_data;
}
ctb_addr_ts++;
ff_hevc_save_states(s, ctb_addr_ts);
ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
}
if (x_ctb + ctb_size >= s->ps.sps->width &&
y_ctb + ctb_size >= s->ps.sps->height)
ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
return ctb_addr_ts;
}
static int hls_slice_data(HEVCContext *s)
{
int arg[2];
int ret[2];
arg[0] = 0;
arg[1] = 1;
s->avctx->execute(s->avctx, hls_decode_entry, arg, ret , 1, sizeof(int));
return ret[0];
}
static int hls_decode_entry_wpp(AVCodecContext *avctxt, void *input_ctb_row, int job, int self_id)
{
HEVCContext *s1 = avctxt->priv_data, *s;
HEVCLocalContext *lc;
int ctb_size = 1<< s1->ps.sps->log2_ctb_size;
int more_data = 1;
int *ctb_row_p = input_ctb_row;
int ctb_row = ctb_row_p[job];
int ctb_addr_rs = s1->sh.slice_ctb_addr_rs + ctb_row * ((s1->ps.sps->width + ctb_size - 1) >> s1->ps.sps->log2_ctb_size);
int ctb_addr_ts = s1->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
int thread = ctb_row % s1->threads_number;
int ret;
s = s1->sList[self_id];
lc = s->HEVClc;
if(ctb_row) {
ret = init_get_bits8(&lc->gb, s->data + s->sh.offset[ctb_row - 1], s->sh.size[ctb_row - 1]);
if (ret < 0)
goto error;
ff_init_cabac_decoder(&lc->cc, s->data + s->sh.offset[(ctb_row)-1], s->sh.size[ctb_row - 1]);
}
while(more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
int x_ctb = (ctb_addr_rs % s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
int y_ctb = (ctb_addr_rs / s->ps.sps->ctb_width) << s->ps.sps->log2_ctb_size;
hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
ff_thread_await_progress2(s->avctx, ctb_row, thread, SHIFT_CTB_WPP);
if (atomic_load(&s1->wpp_err)) {
ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
return 0;
}
ret = ff_hevc_cabac_init(s, ctb_addr_ts);
if (ret < 0)
goto error;
hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
more_data = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
if (more_data < 0) {
ret = more_data;
goto error;
}
ctb_addr_ts++;
ff_hevc_save_states(s, ctb_addr_ts);
ff_thread_report_progress2(s->avctx, ctb_row, thread, 1);
ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
if (!more_data && (x_ctb+ctb_size) < s->ps.sps->width && ctb_row != s->sh.num_entry_point_offsets) {
atomic_store(&s1->wpp_err, 1);
ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
return 0;
}
if ((x_ctb+ctb_size) >= s->ps.sps->width && (y_ctb+ctb_size) >= s->ps.sps->height ) {
ff_hevc_hls_filter(s, x_ctb, y_ctb, ctb_size);
ff_thread_report_progress2(s->avctx, ctb_row , thread, SHIFT_CTB_WPP);
return ctb_addr_ts;
}
ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
x_ctb+=ctb_size;
if(x_ctb >= s->ps.sps->width) {
break;
}
}
ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
return 0;
error:
s->tab_slice_address[ctb_addr_rs] = -1;
atomic_store(&s1->wpp_err, 1);
ff_thread_report_progress2(s->avctx, ctb_row ,thread, SHIFT_CTB_WPP);
return ret;
}
static int hls_slice_data_wpp(HEVCContext *s, const H2645NAL *nal)
{
const uint8_t *data = nal->data;
int length = nal->size;
HEVCLocalContext *lc = s->HEVClc;
int *ret = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
int *arg = av_malloc_array(s->sh.num_entry_point_offsets + 1, sizeof(int));
int64_t offset;
int64_t startheader, cmpt = 0;
int i, j, res = 0;
if (!ret || !arg) {
av_free(ret);
av_free(arg);
return AVERROR(ENOMEM);
}
if (s->sh.slice_ctb_addr_rs + s->sh.num_entry_point_offsets * s->ps.sps->ctb_width >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
av_log(s->avctx, AV_LOG_ERROR, "WPP ctb addresses are wrong (%d %d %d %d)\n",
s->sh.slice_ctb_addr_rs, s->sh.num_entry_point_offsets,
s->ps.sps->ctb_width, s->ps.sps->ctb_height
);
res = AVERROR_INVALIDDATA;
goto error;
}
ff_alloc_entries(s->avctx, s->sh.num_entry_point_offsets + 1);
if (!s->sList[1]) {
for (i = 1; i < s->threads_number; i++) {
s->sList[i] = av_malloc(sizeof(HEVCContext));
memcpy(s->sList[i], s, sizeof(HEVCContext));
s->HEVClcList[i] = av_mallocz(sizeof(HEVCLocalContext));
s->sList[i]->HEVClc = s->HEVClcList[i];
}
}
offset = (lc->gb.index >> 3);
for (j = 0, cmpt = 0, startheader = offset + s->sh.entry_point_offset[0]; j < nal->skipped_bytes; j++) {
if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
startheader--;
cmpt++;
}
}
for (i = 1; i < s->sh.num_entry_point_offsets; i++) {
offset += (s->sh.entry_point_offset[i - 1] - cmpt);
for (j = 0, cmpt = 0, startheader = offset
+ s->sh.entry_point_offset[i]; j < nal->skipped_bytes; j++) {
if (nal->skipped_bytes_pos[j] >= offset && nal->skipped_bytes_pos[j] < startheader) {
startheader--;
cmpt++;
}
}
s->sh.size[i - 1] = s->sh.entry_point_offset[i] - cmpt;
s->sh.offset[i - 1] = offset;
}
if (s->sh.num_entry_point_offsets != 0) {
offset += s->sh.entry_point_offset[s->sh.num_entry_point_offsets - 1] - cmpt;
if (length < offset) {
av_log(s->avctx, AV_LOG_ERROR, "entry_point_offset table is corrupted\n");
res = AVERROR_INVALIDDATA;
goto error;
}
s->sh.size[s->sh.num_entry_point_offsets - 1] = length - offset;
s->sh.offset[s->sh.num_entry_point_offsets - 1] = offset;
}
s->data = data;
for (i = 1; i < s->threads_number; i++) {
s->sList[i]->HEVClc->first_qp_group = 1;
s->sList[i]->HEVClc->qp_y = s->sList[0]->HEVClc->qp_y;
memcpy(s->sList[i], s, sizeof(HEVCContext));
s->sList[i]->HEVClc = s->HEVClcList[i];
}
atomic_store(&s->wpp_err, 0);
ff_reset_entries(s->avctx);
for (i = 0; i <= s->sh.num_entry_point_offsets; i++) {
arg[i] = i;
ret[i] = 0;
}
if (s->ps.pps->entropy_coding_sync_enabled_flag)
s->avctx->execute2(s->avctx, hls_decode_entry_wpp, arg, ret, s->sh.num_entry_point_offsets + 1);
for (i = 0; i <= s->sh.num_entry_point_offsets; i++)
res += ret[i];
error:
av_free(ret);
av_free(arg);
return res;
}
static int set_side_data(HEVCContext *s)
{
AVFrame *out = s->ref->frame;
if (s->sei.frame_packing.present &&
s->sei.frame_packing.arrangement_type >= 3 &&
s->sei.frame_packing.arrangement_type <= 5 &&
s->sei.frame_packing.content_interpretation_type > 0 &&
s->sei.frame_packing.content_interpretation_type < 3) {
AVStereo3D *stereo = av_stereo3d_create_side_data(out);
if (!stereo)
return AVERROR(ENOMEM);
switch (s->sei.frame_packing.arrangement_type) {
case 3:
if (s->sei.frame_packing.quincunx_subsampling)
stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
else
stereo->type = AV_STEREO3D_SIDEBYSIDE;
break;
case 4:
stereo->type = AV_STEREO3D_TOPBOTTOM;
break;
case 5:
stereo->type = AV_STEREO3D_FRAMESEQUENCE;
break;
}
if (s->sei.frame_packing.content_interpretation_type == 2)
stereo->flags = AV_STEREO3D_FLAG_INVERT;
if (s->sei.frame_packing.arrangement_type == 5) {
if (s->sei.frame_packing.current_frame_is_frame0_flag)
stereo->view = AV_STEREO3D_VIEW_LEFT;
else
stereo->view = AV_STEREO3D_VIEW_RIGHT;
}
}
if (s->sei.display_orientation.present &&
(s->sei.display_orientation.anticlockwise_rotation ||
s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
AVFrameSideData *rotation = av_frame_new_side_data(out,
AV_FRAME_DATA_DISPLAYMATRIX,
sizeof(int32_t) * 9);
if (!rotation)
return AVERROR(ENOMEM);
av_display_rotation_set((int32_t *)rotation->data, angle);
av_display_matrix_flip((int32_t *)rotation->data,
s->sei.display_orientation.hflip,
s->sei.display_orientation.vflip);
}
// Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
// so the side data persists for the entire coded video sequence.
if (s->sei.mastering_display.present > 0 &&
IS_IRAP(s) && s->no_rasl_output_flag) {
s->sei.mastering_display.present--;
}
if (s->sei.mastering_display.present) {
// HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
const int mapping[3] = {2, 0, 1};
const int chroma_den = 50000;
const int luma_den = 10000;
int i;
AVMasteringDisplayMetadata *metadata =
av_mastering_display_metadata_create_side_data(out);
if (!metadata)
return AVERROR(ENOMEM);
for (i = 0; i < 3; i++) {
const int j = mapping[i];
metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
metadata->display_primaries[i][0].den = chroma_den;
metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
metadata->display_primaries[i][1].den = chroma_den;
}
metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
metadata->white_point[0].den = chroma_den;
metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
metadata->white_point[1].den = chroma_den;
metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
metadata->max_luminance.den = luma_den;
metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
metadata->min_luminance.den = luma_den;
metadata->has_luminance = 1;
metadata->has_primaries = 1;
av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
av_log(s->avctx, AV_LOG_DEBUG,
"r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
av_q2d(metadata->display_primaries[0][0]),
av_q2d(metadata->display_primaries[0][1]),
av_q2d(metadata->display_primaries[1][0]),
av_q2d(metadata->display_primaries[1][1]),
av_q2d(metadata->display_primaries[2][0]),
av_q2d(metadata->display_primaries[2][1]),
av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
av_log(s->avctx, AV_LOG_DEBUG,
"min_luminance=%f, max_luminance=%f\n",
av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
}
// Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
// so the side data persists for the entire coded video sequence.
if (s->sei.content_light.present > 0 &&
IS_IRAP(s) && s->no_rasl_output_flag) {
s->sei.content_light.present--;
}
if (s->sei.content_light.present) {
AVContentLightMetadata *metadata =
av_content_light_metadata_create_side_data(out);
if (!metadata)
return AVERROR(ENOMEM);
metadata->MaxCLL = s->sei.content_light.max_content_light_level;
metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
metadata->MaxCLL, metadata->MaxFALL);
}
if (s->sei.a53_caption.a53_caption) {
AVFrameSideData* sd = av_frame_new_side_data(out,
AV_FRAME_DATA_A53_CC,
s->sei.a53_caption.a53_caption_size);
if (sd)
memcpy(sd->data, s->sei.a53_caption.a53_caption, s->sei.a53_caption.a53_caption_size);
av_freep(&s->sei.a53_caption.a53_caption);
s->sei.a53_caption.a53_caption_size = 0;
s->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
}
if (s->sei.alternative_transfer.present &&
av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics) &&
s->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) {
s->avctx->color_trc = out->color_trc = s->sei.alternative_transfer.preferred_transfer_characteristics;
}
return 0;
}
static int hevc_frame_start(HEVCContext *s)
{
HEVCLocalContext *lc = s->HEVClc;
int pic_size_in_ctb = ((s->ps.sps->width >> s->ps.sps->log2_min_cb_size) + 1) *
((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1);
int ret;
memset(s->horizontal_bs, 0, s->bs_width * s->bs_height);
memset(s->vertical_bs, 0, s->bs_width * s->bs_height);
memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
memset(s->is_pcm, 0, (s->ps.sps->min_pu_width + 1) * (s->ps.sps->min_pu_height + 1));
memset(s->tab_slice_address, -1, pic_size_in_ctb * sizeof(*s->tab_slice_address));
s->is_decoded = 0;
s->first_nal_type = s->nal_unit_type;
s->no_rasl_output_flag = IS_IDR(s) || IS_BLA(s) || (s->nal_unit_type == HEVC_NAL_CRA_NUT && s->last_eos);
if (s->ps.pps->tiles_enabled_flag)
lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
ret = ff_hevc_set_new_ref(s, &s->frame, s->poc);
if (ret < 0)
goto fail;
ret = ff_hevc_frame_rps(s);
if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
goto fail;
}
s->ref->frame->key_frame = IS_IRAP(s);
ret = set_side_data(s);
if (ret < 0)
goto fail;
s->frame->pict_type = 3 - s->sh.slice_type;
if (!IS_IRAP(s))
ff_hevc_bump_frame(s);
av_frame_unref(s->output_frame);
ret = ff_hevc_output_frame(s, s->output_frame, 0);
if (ret < 0)
goto fail;
if (!s->avctx->hwaccel)
ff_thread_finish_setup(s->avctx);
return 0;
fail:
if (s->ref)
ff_hevc_unref_frame(s, s->ref, ~0);
s->ref = NULL;
return ret;
}
static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
{
HEVCLocalContext *lc = s->HEVClc;
GetBitContext *gb = &lc->gb;
int ctb_addr_ts, ret;
*gb = nal->gb;
s->nal_unit_type = nal->type;
s->temporal_id = nal->temporal_id;
switch (s->nal_unit_type) {
case HEVC_NAL_VPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
s->apply_defdispwin);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_PPS:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_SEI_PREFIX:
case HEVC_NAL_SEI_SUFFIX:
if (s->avctx->hwaccel && s->avctx->hwaccel->decode_params) {
ret = s->avctx->hwaccel->decode_params(s->avctx,
nal->type,
nal->raw_data,
nal->raw_size);
if (ret < 0)
goto fail;
}
ret = ff_hevc_decode_nal_sei(gb, s->avctx, &s->sei, &s->ps, s->nal_unit_type);
if (ret < 0)
goto fail;
break;
case HEVC_NAL_TRAIL_R:
case HEVC_NAL_TRAIL_N:
case HEVC_NAL_TSA_N:
case HEVC_NAL_TSA_R:
case HEVC_NAL_STSA_N:
case HEVC_NAL_STSA_R:
case HEVC_NAL_BLA_W_LP:
case HEVC_NAL_BLA_W_RADL:
case HEVC_NAL_BLA_N_LP:
case HEVC_NAL_IDR_W_RADL:
case HEVC_NAL_IDR_N_LP:
case HEVC_NAL_CRA_NUT:
case HEVC_NAL_RADL_N:
case HEVC_NAL_RADL_R:
case HEVC_NAL_RASL_N:
case HEVC_NAL_RASL_R:
ret = hls_slice_header(s);
if (ret < 0)
return ret;
if (
(s->avctx->skip_frame >= AVDISCARD_BIDIR && s->sh.slice_type == HEVC_SLICE_B) ||
(s->avctx->skip_frame >= AVDISCARD_NONINTRA && s->sh.slice_type != HEVC_SLICE_I) ||
(s->avctx->skip_frame >= AVDISCARD_NONKEY && !IS_IRAP(s))) {
break;
}
if (s->sh.first_slice_in_pic_flag) {
if (s->ref) {
av_log(s->avctx, AV_LOG_ERROR, "Two slices reporting being the first in the same frame.\n");
goto fail;
}
if (s->max_ra == INT_MAX) {
if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
s->max_ra = s->poc;
} else {
if (IS_IDR(s))
s->max_ra = INT_MIN;
}
}
if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
s->poc <= s->max_ra) {
s->is_decoded = 0;
break;
} else {
if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
s->max_ra = INT_MIN;
}
s->overlap ++;
ret = hevc_frame_start(s);
if (ret < 0)
return ret;
} else if (!s->ref) {
av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
goto fail;
}
if (s->nal_unit_type != s->first_nal_type) {
av_log(s->avctx, AV_LOG_ERROR,
"Non-matching NAL types of the VCL NALUs: %d %d\n",
s->first_nal_type, s->nal_unit_type);
return AVERROR_INVALIDDATA;
}
if (!s->sh.dependent_slice_segment_flag &&
s->sh.slice_type != HEVC_SLICE_I) {
ret = ff_hevc_slice_rpl(s);
if (ret < 0) {
av_log(s->avctx, AV_LOG_WARNING,
"Error constructing the reference lists for the current slice.\n");
goto fail;
}
}
if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
if (ret < 0)
goto fail;
}
if (s->avctx->hwaccel) {
ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
if (ret < 0)
goto fail;
} else {
if (s->threads_number > 1 && s->sh.num_entry_point_offsets > 0)
ctb_addr_ts = hls_slice_data_wpp(s, nal);
else
ctb_addr_ts = hls_slice_data(s);
if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
s->is_decoded = 1;
}
if (ctb_addr_ts < 0) {
ret = ctb_addr_ts;
goto fail;
}
}
break;
case HEVC_NAL_EOS_NUT:
case HEVC_NAL_EOB_NUT:
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
break;
case HEVC_NAL_AUD:
case HEVC_NAL_FD_NUT:
break;
default:
av_log(s->avctx, AV_LOG_INFO,
"Skipping NAL unit %d\n", s->nal_unit_type);
}
return 0;
fail:
if (s->avctx->err_recognition & AV_EF_EXPLODE)
return ret;
return 0;
}
static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
{
int i, ret = 0;
int eos_at_start = 1;
s->ref = NULL;
s->last_eos = s->eos;
s->eos = 0;
s->overlap = 0;
/* split the input packet into NAL units, so we know the upper bound on the
* number of slices in the frame */
ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
s->nal_length_size, s->avctx->codec_id, 1, 0);
if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"Error splitting the input into NAL units.\n");
return ret;
}
for (i = 0; i < s->pkt.nb_nals; i++) {
if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
s->pkt.nals[i].type == HEVC_NAL_EOS_NUT) {
if (eos_at_start) {
s->last_eos = 1;
} else {
s->eos = 1;
}
} else {
eos_at_start = 0;
}
}
/* decode the NAL units */
for (i = 0; i < s->pkt.nb_nals; i++) {
H2645NAL *nal = &s->pkt.nals[i];
if (s->avctx->skip_frame >= AVDISCARD_ALL ||
(s->avctx->skip_frame >= AVDISCARD_NONREF
&& ff_hevc_nal_is_nonref(nal->type)))
continue;
ret = decode_nal_unit(s, nal);
if (ret >= 0 && s->overlap > 2)
ret = AVERROR_INVALIDDATA;
if (ret < 0) {
av_log(s->avctx, AV_LOG_WARNING,
"Error parsing NAL unit #%d.\n", i);
goto fail;
}
}
fail:
if (s->ref && s->threads_type == FF_THREAD_FRAME)
ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
return ret;
}
static void print_md5(void *log_ctx, int level, uint8_t md5[16])
{
int i;
for (i = 0; i < 16; i++)
av_log(log_ctx, level, "%02"PRIx8, md5[i]);
}
static int verify_md5(HEVCContext *s, AVFrame *frame)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
int pixel_shift;
int i, j;
if (!desc)
return AVERROR(EINVAL);
pixel_shift = desc->comp[0].depth > 8;
av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
s->poc);
/* the checksums are LE, so we have to byteswap for >8bpp formats
* on BE arches */
#if HAVE_BIGENDIAN
if (pixel_shift && !s->checksum_buf) {
av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
FFMAX3(frame->linesize[0], frame->linesize[1],
frame->linesize[2]));
if (!s->checksum_buf)
return AVERROR(ENOMEM);
}
#endif
for (i = 0; frame->data[i]; i++) {
int width = s->avctx->coded_width;
int height = s->avctx->coded_height;
int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
uint8_t md5[16];
av_md5_init(s->md5_ctx);
for (j = 0; j < h; j++) {
const uint8_t *src = frame->data[i] + j * frame->linesize[i];
#if HAVE_BIGENDIAN
if (pixel_shift) {
s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
(const uint16_t *) src, w);
src = s->checksum_buf;
}
#endif
av_md5_update(s->md5_ctx, src, w << pixel_shift);
}
av_md5_final(s->md5_ctx, md5);
if (!memcmp(md5, s->sei.picture_hash.md5[i], 16)) {
av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
print_md5(s->avctx, AV_LOG_DEBUG, md5);
av_log (s->avctx, AV_LOG_DEBUG, "; ");
} else {
av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
print_md5(s->avctx, AV_LOG_ERROR, md5);
av_log (s->avctx, AV_LOG_ERROR, " != ");
print_md5(s->avctx, AV_LOG_ERROR, s->sei.picture_hash.md5[i]);
av_log (s->avctx, AV_LOG_ERROR, "\n");
return AVERROR_INVALIDDATA;
}
}
av_log(s->avctx, AV_LOG_DEBUG, "\n");
return 0;
}
static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length, int first)
{
int ret, i;
ret = ff_hevc_decode_extradata(buf, length, &s->ps, &s->sei, &s->is_nalff,
&s->nal_length_size, s->avctx->err_recognition,
s->apply_defdispwin, s->avctx);
if (ret < 0)
return ret;
/* export stream parameters from the first SPS */
for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
if (first && s->ps.sps_list[i]) {
const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
export_stream_params(s->avctx, &s->ps, sps);
break;
}
}
return 0;
}
static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
AVPacket *avpkt)
{
int ret;
int new_extradata_size;
uint8_t *new_extradata;
HEVCContext *s = avctx->priv_data;
if (!avpkt->size) {
ret = ff_hevc_output_frame(s, data, 1);
if (ret < 0)
return ret;
*got_output = ret;
return 0;
}
new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
&new_extradata_size);
if (new_extradata && new_extradata_size > 0) {
ret = hevc_decode_extradata(s, new_extradata, new_extradata_size, 0);
if (ret < 0)
return ret;
}
s->ref = NULL;
ret = decode_nal_units(s, avpkt->data, avpkt->size);
if (ret < 0)
return ret;
if (avctx->hwaccel) {
if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) {
av_log(avctx, AV_LOG_ERROR,
"hardware accelerator failed to decode picture\n");
ff_hevc_unref_frame(s, s->ref, ~0);
return ret;
}
} else {
/* verify the SEI checksum */
if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
s->sei.picture_hash.is_md5) {
ret = verify_md5(s, s->ref->frame);
if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
ff_hevc_unref_frame(s, s->ref, ~0);
return ret;
}
}
}
s->sei.picture_hash.is_md5 = 0;
if (s->is_decoded) {
av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
s->is_decoded = 0;
}
if (s->output_frame->buf[0]) {
av_frame_move_ref(data, s->output_frame);
*got_output = 1;
}
return avpkt->size;
}
static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
{
int ret;
ret = ff_thread_ref_frame(&dst->tf, &src->tf);
if (ret < 0)
return ret;
dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
if (!dst->tab_mvf_buf)
goto fail;
dst->tab_mvf = src->tab_mvf;
dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
if (!dst->rpl_tab_buf)
goto fail;
dst->rpl_tab = src->rpl_tab;
dst->rpl_buf = av_buffer_ref(src->rpl_buf);
if (!dst->rpl_buf)
goto fail;
dst->poc = src->poc;
dst->ctb_count = src->ctb_count;
dst->flags = src->flags;
dst->sequence = src->sequence;
if (src->hwaccel_picture_private) {
dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
if (!dst->hwaccel_priv_buf)
goto fail;
dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
}
return 0;
fail:
ff_hevc_unref_frame(s, dst, ~0);
return AVERROR(ENOMEM);
}
static av_cold int hevc_decode_free(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
int i;
pic_arrays_free(s);
av_freep(&s->md5_ctx);
av_freep(&s->cabac_state);
for (i = 0; i < 3; i++) {
av_freep(&s->sao_pixel_buffer_h[i]);
av_freep(&s->sao_pixel_buffer_v[i]);
}
av_frame_free(&s->output_frame);
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
ff_hevc_unref_frame(s, &s->DPB[i], ~0);
av_frame_free(&s->DPB[i].frame);
}
ff_hevc_ps_uninit(&s->ps);
av_freep(&s->sh.entry_point_offset);
av_freep(&s->sh.offset);
av_freep(&s->sh.size);
for (i = 1; i < s->threads_number; i++) {
HEVCLocalContext *lc = s->HEVClcList[i];
if (lc) {
av_freep(&s->HEVClcList[i]);
av_freep(&s->sList[i]);
}
}
if (s->HEVClc == s->HEVClcList[0])
s->HEVClc = NULL;
av_freep(&s->HEVClcList[0]);
ff_h2645_packet_uninit(&s->pkt);
return 0;
}
static av_cold int hevc_init_context(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
int i;
s->avctx = avctx;
s->HEVClc = av_mallocz(sizeof(HEVCLocalContext));
if (!s->HEVClc)
goto fail;
s->HEVClcList[0] = s->HEVClc;
s->sList[0] = s;
s->cabac_state = av_malloc(HEVC_CONTEXTS);
if (!s->cabac_state)
goto fail;
s->output_frame = av_frame_alloc();
if (!s->output_frame)
goto fail;
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
s->DPB[i].frame = av_frame_alloc();
if (!s->DPB[i].frame)
goto fail;
s->DPB[i].tf.f = s->DPB[i].frame;
}
s->max_ra = INT_MAX;
s->md5_ctx = av_md5_alloc();
if (!s->md5_ctx)
goto fail;
ff_bswapdsp_init(&s->bdsp);
s->context_initialized = 1;
s->eos = 0;
ff_hevc_reset_sei(&s->sei);
return 0;
fail:
hevc_decode_free(avctx);
return AVERROR(ENOMEM);
}
#if HAVE_THREADS
static int hevc_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src)
{
HEVCContext *s = dst->priv_data;
HEVCContext *s0 = src->priv_data;
int i, ret;
if (!s->context_initialized) {
ret = hevc_init_context(dst);
if (ret < 0)
return ret;
}
for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
ff_hevc_unref_frame(s, &s->DPB[i], ~0);
if (s0->DPB[i].frame->buf[0]) {
ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
if (ret < 0)
return ret;
}
}
if (s->ps.sps != s0->ps.sps)
s->ps.sps = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
av_buffer_unref(&s->ps.vps_list[i]);
if (s0->ps.vps_list[i]) {
s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]);
if (!s->ps.vps_list[i])
return AVERROR(ENOMEM);
}
}
for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
av_buffer_unref(&s->ps.sps_list[i]);
if (s0->ps.sps_list[i]) {
s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]);
if (!s->ps.sps_list[i])
return AVERROR(ENOMEM);
}
}
for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
av_buffer_unref(&s->ps.pps_list[i]);
if (s0->ps.pps_list[i]) {
s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]);
if (!s->ps.pps_list[i])
return AVERROR(ENOMEM);
}
}
if (s->ps.sps != s0->ps.sps)
if ((ret = set_sps(s, s0->ps.sps, src->pix_fmt)) < 0)
return ret;
s->seq_decode = s0->seq_decode;
s->seq_output = s0->seq_output;
s->pocTid0 = s0->pocTid0;
s->max_ra = s0->max_ra;
s->eos = s0->eos;
s->no_rasl_output_flag = s0->no_rasl_output_flag;
s->is_nalff = s0->is_nalff;
s->nal_length_size = s0->nal_length_size;
s->threads_number = s0->threads_number;
s->threads_type = s0->threads_type;
if (s0->eos) {
s->seq_decode = (s->seq_decode + 1) & 0xff;
s->max_ra = INT_MAX;
}
s->sei.frame_packing = s0->sei.frame_packing;
s->sei.display_orientation = s0->sei.display_orientation;
s->sei.mastering_display = s0->sei.mastering_display;
s->sei.content_light = s0->sei.content_light;
s->sei.alternative_transfer = s0->sei.alternative_transfer;
return 0;
}
#endif
static av_cold int hevc_decode_init(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
int ret;
avctx->internal->allocate_progress = 1;
ret = hevc_init_context(avctx);
if (ret < 0)
return ret;
s->enable_parallel_tiles = 0;
s->sei.picture_timing.picture_struct = 0;
s->eos = 1;
atomic_init(&s->wpp_err, 0);
if(avctx->active_thread_type & FF_THREAD_SLICE)
s->threads_number = avctx->thread_count;
else
s->threads_number = 1;
if (avctx->extradata_size > 0 && avctx->extradata) {
ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size, 1);
if (ret < 0) {
hevc_decode_free(avctx);
return ret;
}
}
if((avctx->active_thread_type & FF_THREAD_FRAME) && avctx->thread_count > 1)
s->threads_type = FF_THREAD_FRAME;
else
s->threads_type = FF_THREAD_SLICE;
return 0;
}
#if HAVE_THREADS
static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
int ret;
memset(s, 0, sizeof(*s));
ret = hevc_init_context(avctx);
if (ret < 0)
return ret;
return 0;
}
#endif
static void hevc_decode_flush(AVCodecContext *avctx)
{
HEVCContext *s = avctx->priv_data;
ff_hevc_flush_dpb(s);
s->max_ra = INT_MAX;
s->eos = 1;
}
#define OFFSET(x) offsetof(HEVCContext, x)
#define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
static const AVOption options[] = {
{ "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
{ "strict-displaywin", "stricly apply default display window size", OFFSET(apply_defdispwin),
AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, PAR },
{ NULL },
};
static const AVClass hevc_decoder_class = {
.class_name = "HEVC decoder",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
AVCodec ff_hevc_decoder = {
.name = "hevc",
.long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_HEVC,
.priv_data_size = sizeof(HEVCContext),
.priv_class = &hevc_decoder_class,
.init = hevc_decode_init,
.close = hevc_decode_free,
.decode = hevc_decode_frame,
.flush = hevc_decode_flush,
.update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
.init_thread_copy = ONLY_IF_THREADS_ENABLED(hevc_init_thread_copy),
.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING,
.profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
.hw_configs = (const AVCodecHWConfigInternal*[]) {
#if CONFIG_HEVC_DXVA2_HWACCEL
HWACCEL_DXVA2(hevc),
#endif
#if CONFIG_HEVC_D3D11VA_HWACCEL
HWACCEL_D3D11VA(hevc),
#endif
#if CONFIG_HEVC_D3D11VA2_HWACCEL
HWACCEL_D3D11VA2(hevc),
#endif
#if CONFIG_HEVC_NVDEC_HWACCEL
HWACCEL_NVDEC(hevc),
#endif
#if CONFIG_HEVC_VAAPI_HWACCEL
HWACCEL_VAAPI(hevc),
#endif
#if CONFIG_HEVC_VDPAU_HWACCEL
HWACCEL_VDPAU(hevc),
#endif
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
HWACCEL_VIDEOTOOLBOX(hevc),
#endif
NULL
},
};
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_799_0 |
crossvul-cpp_data_good_887_0 | /*
* Copyright (C) 2011 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#define pr_fmt(fmt) "llcp: %s: " fmt, __func__
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/nfc.h>
#include <net/nfc/nfc.h>
#include "nfc.h"
#include "llcp.h"
static u8 llcp_tlv_length[LLCP_TLV_MAX] = {
0,
1, /* VERSION */
2, /* MIUX */
2, /* WKS */
1, /* LTO */
1, /* RW */
0, /* SN */
1, /* OPT */
0, /* SDREQ */
2, /* SDRES */
};
static u8 llcp_tlv8(u8 *tlv, u8 type)
{
if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
return 0;
return tlv[2];
}
static u16 llcp_tlv16(u8 *tlv, u8 type)
{
if (tlv[0] != type || tlv[1] != llcp_tlv_length[tlv[0]])
return 0;
return be16_to_cpu(*((__be16 *)(tlv + 2)));
}
static u8 llcp_tlv_version(u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_VERSION);
}
static u16 llcp_tlv_miux(u8 *tlv)
{
return llcp_tlv16(tlv, LLCP_TLV_MIUX) & 0x7ff;
}
static u16 llcp_tlv_wks(u8 *tlv)
{
return llcp_tlv16(tlv, LLCP_TLV_WKS);
}
static u16 llcp_tlv_lto(u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_LTO);
}
static u8 llcp_tlv_opt(u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_OPT);
}
static u8 llcp_tlv_rw(u8 *tlv)
{
return llcp_tlv8(tlv, LLCP_TLV_RW) & 0xf;
}
u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
{
u8 *tlv, length;
pr_debug("type %d\n", type);
if (type >= LLCP_TLV_MAX)
return NULL;
length = llcp_tlv_length[type];
if (length == 0 && value_length == 0)
return NULL;
else if (length == 0)
length = value_length;
*tlv_length = 2 + length;
tlv = kzalloc(2 + length, GFP_KERNEL);
if (tlv == NULL)
return tlv;
tlv[0] = type;
tlv[1] = length;
memcpy(tlv + 2, value, length);
return tlv;
}
struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap)
{
struct nfc_llcp_sdp_tlv *sdres;
u8 value[2];
sdres = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
if (sdres == NULL)
return NULL;
value[0] = tid;
value[1] = sap;
sdres->tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, value, 2,
&sdres->tlv_len);
if (sdres->tlv == NULL) {
kfree(sdres);
return NULL;
}
sdres->tid = tid;
sdres->sap = sap;
INIT_HLIST_NODE(&sdres->node);
return sdres;
}
struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
size_t uri_len)
{
struct nfc_llcp_sdp_tlv *sdreq;
pr_debug("uri: %s, len: %zu\n", uri, uri_len);
/* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
return NULL;
sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
if (sdreq == NULL)
return NULL;
sdreq->tlv_len = uri_len + 3;
if (uri[uri_len - 1] == 0)
sdreq->tlv_len--;
sdreq->tlv = kzalloc(sdreq->tlv_len + 1, GFP_KERNEL);
if (sdreq->tlv == NULL) {
kfree(sdreq);
return NULL;
}
sdreq->tlv[0] = LLCP_TLV_SDREQ;
sdreq->tlv[1] = sdreq->tlv_len - 2;
sdreq->tlv[2] = tid;
sdreq->tid = tid;
sdreq->uri = sdreq->tlv + 3;
memcpy(sdreq->uri, uri, uri_len);
sdreq->time = jiffies;
INIT_HLIST_NODE(&sdreq->node);
return sdreq;
}
void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp)
{
kfree(sdp->tlv);
kfree(sdp);
}
void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head)
{
struct nfc_llcp_sdp_tlv *sdp;
struct hlist_node *n;
hlist_for_each_entry_safe(sdp, n, head, node) {
hlist_del(&sdp->node);
nfc_llcp_free_sdp_tlv(sdp);
}
}
int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
u8 *tlv_array, u16 tlv_array_len)
{
u8 *tlv = tlv_array, type, length, offset = 0;
pr_debug("TLV array length %d\n", tlv_array_len);
if (local == NULL)
return -ENODEV;
while (offset < tlv_array_len) {
type = tlv[0];
length = tlv[1];
pr_debug("type 0x%x length %d\n", type, length);
switch (type) {
case LLCP_TLV_VERSION:
local->remote_version = llcp_tlv_version(tlv);
break;
case LLCP_TLV_MIUX:
local->remote_miu = llcp_tlv_miux(tlv) + 128;
break;
case LLCP_TLV_WKS:
local->remote_wks = llcp_tlv_wks(tlv);
break;
case LLCP_TLV_LTO:
local->remote_lto = llcp_tlv_lto(tlv) * 10;
break;
case LLCP_TLV_OPT:
local->remote_opt = llcp_tlv_opt(tlv);
break;
default:
pr_err("Invalid gt tlv value 0x%x\n", type);
break;
}
offset += length + 2;
tlv += length + 2;
}
pr_debug("version 0x%x miu %d lto %d opt 0x%x wks 0x%x\n",
local->remote_version, local->remote_miu,
local->remote_lto, local->remote_opt,
local->remote_wks);
return 0;
}
int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
u8 *tlv_array, u16 tlv_array_len)
{
u8 *tlv = tlv_array, type, length, offset = 0;
pr_debug("TLV array length %d\n", tlv_array_len);
if (sock == NULL)
return -ENOTCONN;
while (offset < tlv_array_len) {
type = tlv[0];
length = tlv[1];
pr_debug("type 0x%x length %d\n", type, length);
switch (type) {
case LLCP_TLV_MIUX:
sock->remote_miu = llcp_tlv_miux(tlv) + 128;
break;
case LLCP_TLV_RW:
sock->remote_rw = llcp_tlv_rw(tlv);
break;
case LLCP_TLV_SN:
break;
default:
pr_err("Invalid gt tlv value 0x%x\n", type);
break;
}
offset += length + 2;
tlv += length + 2;
}
pr_debug("sock %p rw %d miu %d\n", sock,
sock->remote_rw, sock->remote_miu);
return 0;
}
static struct sk_buff *llcp_add_header(struct sk_buff *pdu,
u8 dsap, u8 ssap, u8 ptype)
{
u8 header[2];
pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
header[0] = (u8)((dsap << 2) | (ptype >> 2));
header[1] = (u8)((ptype << 6) | ssap);
pr_debug("header 0x%x 0x%x\n", header[0], header[1]);
skb_put_data(pdu, header, LLCP_HEADER_SIZE);
return pdu;
}
static struct sk_buff *llcp_add_tlv(struct sk_buff *pdu, u8 *tlv,
u8 tlv_length)
{
/* XXX Add an skb length check */
if (tlv == NULL)
return NULL;
skb_put_data(pdu, tlv, tlv_length);
return pdu;
}
static struct sk_buff *llcp_allocate_pdu(struct nfc_llcp_sock *sock,
u8 cmd, u16 size)
{
struct sk_buff *skb;
int err;
if (sock->ssap == 0)
return NULL;
skb = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
size + LLCP_HEADER_SIZE, &err);
if (skb == NULL) {
pr_err("Could not allocate PDU\n");
return NULL;
}
skb = llcp_add_header(skb, sock->dsap, sock->ssap, cmd);
return skb;
}
int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock)
{
struct sk_buff *skb;
struct nfc_dev *dev;
struct nfc_llcp_local *local;
pr_debug("Sending DISC\n");
local = sock->local;
if (local == NULL)
return -ENODEV;
dev = sock->dev;
if (dev == NULL)
return -ENODEV;
skb = llcp_allocate_pdu(sock, LLCP_PDU_DISC, 0);
if (skb == NULL)
return -ENOMEM;
skb_queue_tail(&local->tx_queue, skb);
return 0;
}
int nfc_llcp_send_symm(struct nfc_dev *dev)
{
struct sk_buff *skb;
struct nfc_llcp_local *local;
u16 size = 0;
pr_debug("Sending SYMM\n");
local = nfc_llcp_find_local(dev);
if (local == NULL)
return -ENODEV;
size += LLCP_HEADER_SIZE;
size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
skb = alloc_skb(size, GFP_KERNEL);
if (skb == NULL)
return -ENOMEM;
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
skb = llcp_add_header(skb, 0, 0, LLCP_PDU_SYMM);
__net_timestamp(skb);
nfc_llcp_send_to_raw_sock(local, skb, NFC_DIRECTION_TX);
return nfc_data_exchange(dev, local->target_idx, skb,
nfc_llcp_recv, local);
}
int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
{
struct nfc_llcp_local *local;
struct sk_buff *skb;
u8 *service_name_tlv = NULL, service_name_tlv_length;
u8 *miux_tlv = NULL, miux_tlv_length;
u8 *rw_tlv = NULL, rw_tlv_length, rw;
int err;
u16 size = 0;
__be16 miux;
pr_debug("Sending CONNECT\n");
local = sock->local;
if (local == NULL)
return -ENODEV;
if (sock->service_name != NULL) {
service_name_tlv = nfc_llcp_build_tlv(LLCP_TLV_SN,
sock->service_name,
sock->service_name_len,
&service_name_tlv_length);
if (!service_name_tlv) {
err = -ENOMEM;
goto error_tlv;
}
size += service_name_tlv_length;
}
/* If the socket parameters are not set, use the local ones */
miux = be16_to_cpu(sock->miux) > LLCP_MAX_MIUX ?
local->miux : sock->miux;
rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw;
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
if (!miux_tlv) {
err = -ENOMEM;
goto error_tlv;
}
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
if (!rw_tlv) {
err = -ENOMEM;
goto error_tlv;
}
size += rw_tlv_length;
pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
skb = llcp_allocate_pdu(sock, LLCP_PDU_CONNECT, size);
if (skb == NULL) {
err = -ENOMEM;
goto error_tlv;
}
llcp_add_tlv(skb, service_name_tlv, service_name_tlv_length);
llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
skb_queue_tail(&local->tx_queue, skb);
err = 0;
error_tlv:
if (err)
pr_err("error %d\n", err);
kfree(service_name_tlv);
kfree(miux_tlv);
kfree(rw_tlv);
return err;
}
int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
{
struct nfc_llcp_local *local;
struct sk_buff *skb;
u8 *miux_tlv = NULL, miux_tlv_length;
u8 *rw_tlv = NULL, rw_tlv_length, rw;
int err;
u16 size = 0;
__be16 miux;
pr_debug("Sending CC\n");
local = sock->local;
if (local == NULL)
return -ENODEV;
/* If the socket parameters are not set, use the local ones */
miux = be16_to_cpu(sock->miux) > LLCP_MAX_MIUX ?
local->miux : sock->miux;
rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw;
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
if (!miux_tlv) {
err = -ENOMEM;
goto error_tlv;
}
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
if (!rw_tlv) {
err = -ENOMEM;
goto error_tlv;
}
size += rw_tlv_length;
skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
if (skb == NULL) {
err = -ENOMEM;
goto error_tlv;
}
llcp_add_tlv(skb, miux_tlv, miux_tlv_length);
llcp_add_tlv(skb, rw_tlv, rw_tlv_length);
skb_queue_tail(&local->tx_queue, skb);
err = 0;
error_tlv:
if (err)
pr_err("error %d\n", err);
kfree(miux_tlv);
kfree(rw_tlv);
return err;
}
static struct sk_buff *nfc_llcp_allocate_snl(struct nfc_llcp_local *local,
size_t tlv_length)
{
struct sk_buff *skb;
struct nfc_dev *dev;
u16 size = 0;
if (local == NULL)
return ERR_PTR(-ENODEV);
dev = local->dev;
if (dev == NULL)
return ERR_PTR(-ENODEV);
size += LLCP_HEADER_SIZE;
size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
size += tlv_length;
skb = alloc_skb(size, GFP_KERNEL);
if (skb == NULL)
return ERR_PTR(-ENOMEM);
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL);
return skb;
}
int nfc_llcp_send_snl_sdres(struct nfc_llcp_local *local,
struct hlist_head *tlv_list, size_t tlvs_len)
{
struct nfc_llcp_sdp_tlv *sdp;
struct hlist_node *n;
struct sk_buff *skb;
skb = nfc_llcp_allocate_snl(local, tlvs_len);
if (IS_ERR(skb))
return PTR_ERR(skb);
hlist_for_each_entry_safe(sdp, n, tlv_list, node) {
skb_put_data(skb, sdp->tlv, sdp->tlv_len);
hlist_del(&sdp->node);
nfc_llcp_free_sdp_tlv(sdp);
}
skb_queue_tail(&local->tx_queue, skb);
return 0;
}
int nfc_llcp_send_snl_sdreq(struct nfc_llcp_local *local,
struct hlist_head *tlv_list, size_t tlvs_len)
{
struct nfc_llcp_sdp_tlv *sdreq;
struct hlist_node *n;
struct sk_buff *skb;
skb = nfc_llcp_allocate_snl(local, tlvs_len);
if (IS_ERR(skb))
return PTR_ERR(skb);
mutex_lock(&local->sdreq_lock);
if (hlist_empty(&local->pending_sdreqs))
mod_timer(&local->sdreq_timer,
jiffies + msecs_to_jiffies(3 * local->remote_lto));
hlist_for_each_entry_safe(sdreq, n, tlv_list, node) {
pr_debug("tid %d for %s\n", sdreq->tid, sdreq->uri);
skb_put_data(skb, sdreq->tlv, sdreq->tlv_len);
hlist_del(&sdreq->node);
hlist_add_head(&sdreq->node, &local->pending_sdreqs);
}
mutex_unlock(&local->sdreq_lock);
skb_queue_tail(&local->tx_queue, skb);
return 0;
}
int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
{
struct sk_buff *skb;
struct nfc_dev *dev;
u16 size = 1; /* Reason code */
pr_debug("Sending DM reason 0x%x\n", reason);
if (local == NULL)
return -ENODEV;
dev = local->dev;
if (dev == NULL)
return -ENODEV;
size += LLCP_HEADER_SIZE;
size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
skb = alloc_skb(size, GFP_KERNEL);
if (skb == NULL)
return -ENOMEM;
skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM);
skb_put_data(skb, &reason, 1);
skb_queue_head(&local->tx_queue, skb);
return 0;
}
int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
struct msghdr *msg, size_t len)
{
struct sk_buff *pdu;
struct sock *sk = &sock->sk;
struct nfc_llcp_local *local;
size_t frag_len = 0, remaining_len;
u8 *msg_data, *msg_ptr;
u16 remote_miu;
pr_debug("Send I frame len %zd\n", len);
local = sock->local;
if (local == NULL)
return -ENODEV;
/* Remote is ready but has not acknowledged our frames */
if((sock->remote_ready &&
skb_queue_len(&sock->tx_pending_queue) >= sock->remote_rw &&
skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) {
pr_err("Pending queue is full %d frames\n",
skb_queue_len(&sock->tx_pending_queue));
return -ENOBUFS;
}
/* Remote is not ready and we've been queueing enough frames */
if ((!sock->remote_ready &&
skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) {
pr_err("Tx queue is full %d frames\n",
skb_queue_len(&sock->tx_queue));
return -ENOBUFS;
}
msg_data = kmalloc(len, GFP_USER | __GFP_NOWARN);
if (msg_data == NULL)
return -ENOMEM;
if (memcpy_from_msg(msg_data, msg, len)) {
kfree(msg_data);
return -EFAULT;
}
remaining_len = len;
msg_ptr = msg_data;
do {
remote_miu = sock->remote_miu > LLCP_MAX_MIU ?
LLCP_DEFAULT_MIU : sock->remote_miu;
frag_len = min_t(size_t, remote_miu, remaining_len);
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
pdu = llcp_allocate_pdu(sock, LLCP_PDU_I,
frag_len + LLCP_SEQUENCE_SIZE);
if (pdu == NULL) {
kfree(msg_data);
return -ENOMEM;
}
skb_put(pdu, LLCP_SEQUENCE_SIZE);
if (likely(frag_len > 0))
skb_put_data(pdu, msg_ptr, frag_len);
skb_queue_tail(&sock->tx_queue, pdu);
lock_sock(sk);
nfc_llcp_queue_i_frames(sock);
release_sock(sk);
remaining_len -= frag_len;
msg_ptr += frag_len;
} while (remaining_len > 0);
kfree(msg_data);
return len;
}
int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
struct msghdr *msg, size_t len)
{
struct sk_buff *pdu;
struct nfc_llcp_local *local;
size_t frag_len = 0, remaining_len;
u8 *msg_ptr, *msg_data;
u16 remote_miu;
int err;
pr_debug("Send UI frame len %zd\n", len);
local = sock->local;
if (local == NULL)
return -ENODEV;
msg_data = kmalloc(len, GFP_USER | __GFP_NOWARN);
if (msg_data == NULL)
return -ENOMEM;
if (memcpy_from_msg(msg_data, msg, len)) {
kfree(msg_data);
return -EFAULT;
}
remaining_len = len;
msg_ptr = msg_data;
do {
remote_miu = sock->remote_miu > LLCP_MAX_MIU ?
local->remote_miu : sock->remote_miu;
frag_len = min_t(size_t, remote_miu, remaining_len);
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
frag_len + LLCP_HEADER_SIZE, &err);
if (pdu == NULL) {
pr_err("Could not allocate PDU (error=%d)\n", err);
len -= remaining_len;
if (len == 0)
len = err;
break;
}
pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
if (likely(frag_len > 0))
skb_put_data(pdu, msg_ptr, frag_len);
/* No need to check for the peer RW for UI frames */
skb_queue_tail(&local->tx_queue, pdu);
remaining_len -= frag_len;
msg_ptr += frag_len;
} while (remaining_len > 0);
kfree(msg_data);
return len;
}
int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
{
struct sk_buff *skb;
struct nfc_llcp_local *local;
pr_debug("Send rr nr %d\n", sock->recv_n);
local = sock->local;
if (local == NULL)
return -ENODEV;
skb = llcp_allocate_pdu(sock, LLCP_PDU_RR, LLCP_SEQUENCE_SIZE);
if (skb == NULL)
return -ENOMEM;
skb_put(skb, LLCP_SEQUENCE_SIZE);
skb->data[2] = sock->recv_n;
skb_queue_head(&local->tx_queue, skb);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_887_0 |
crossvul-cpp_data_bad_3256_1 | /*
* This contains functions for filename crypto management
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility
*
* Written by Uday Savagaonkar, 2014.
* Modified by Jaegeuk Kim, 2015.
*
* This has not yet undergone a rigorous security audit.
*/
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include "fscrypt_private.h"
/**
* fname_crypt_complete() - completion callback for filename crypto
* @req: The asynchronous cipher request context
* @res: The result of the cipher operation
*/
static void fname_crypt_complete(struct crypto_async_request *req, int res)
{
struct fscrypt_completion_result *ecr = req->data;
if (res == -EINPROGRESS)
return;
ecr->res = res;
complete(&ecr->completion);
}
/**
* fname_encrypt() - encrypt a filename
*
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
static int fname_encrypt(struct inode *inode,
const struct qstr *iname, struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
char iv[FS_CRYPTO_BLOCK_SIZE];
struct scatterlist sg;
int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
unsigned int lim;
unsigned int cryptlen;
lim = inode->i_sb->s_cop->max_namelen(inode);
if (iname->len <= 0 || iname->len > lim)
return -EIO;
/*
* Copy the filename to the output buffer for encrypting in-place and
* pad it with the needed number of NUL bytes.
*/
cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE);
cryptlen = round_up(cryptlen, padding);
cryptlen = min(cryptlen, lim);
memcpy(oname->name, iname->name, iname->len);
memset(oname->name + iname->len, 0, cryptlen - iname->len);
/* Initialize the IV */
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
/* Set up the encryption request */
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: skcipher_request_alloc() failed\n", __func__);
return -ENOMEM;
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
fname_crypt_complete, &ecr);
sg_init_one(&sg, oname->name, cryptlen);
skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv);
/* Do the encryption */
res = crypto_skcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
/* Request is being completed asynchronously; wait for it */
wait_for_completion(&ecr.completion);
res = ecr.res;
}
skcipher_request_free(req);
if (res < 0) {
printk_ratelimited(KERN_ERR
"%s: Error (error code %d)\n", __func__, res);
return res;
}
oname->len = cryptlen;
return 0;
}
/**
* fname_decrypt() - decrypt a filename
*
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
static int fname_decrypt(struct inode *inode,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct scatterlist src_sg, dst_sg;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
char iv[FS_CRYPTO_BLOCK_SIZE];
unsigned lim;
lim = inode->i_sb->s_cop->max_namelen(inode);
if (iname->len <= 0 || iname->len > lim)
return -EIO;
/* Allocate request */
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n", __func__);
return -ENOMEM;
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
fname_crypt_complete, &ecr);
/* Initialize IV */
memset(iv, 0, FS_CRYPTO_BLOCK_SIZE);
/* Create decryption request */
sg_init_one(&src_sg, iname->name, iname->len);
sg_init_one(&dst_sg, oname->name, oname->len);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
res = crypto_skcipher_decrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
wait_for_completion(&ecr.completion);
res = ecr.res;
}
skcipher_request_free(req);
if (res < 0) {
printk_ratelimited(KERN_ERR
"%s: Error (error code %d)\n", __func__, res);
return res;
}
oname->len = strnlen(oname->name, iname->len);
return 0;
}
static const char *lookup_table =
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
/**
* digest_encode() -
*
* Encodes the input digest using characters from the set [a-zA-Z0-9_+].
* The encoded string is roughly 4/3 times the size of the input string.
*/
static int digest_encode(const char *src, int len, char *dst)
{
int i = 0, bits = 0, ac = 0;
char *cp = dst;
while (i < len) {
ac += (((unsigned char) src[i]) << bits);
bits += 8;
do {
*cp++ = lookup_table[ac & 0x3f];
ac >>= 6;
bits -= 6;
} while (bits >= 6);
i++;
}
if (bits)
*cp++ = lookup_table[ac & 0x3f];
return cp - dst;
}
static int digest_decode(const char *src, int len, char *dst)
{
int i = 0, bits = 0, ac = 0;
const char *p;
char *cp = dst;
while (i < len) {
p = strchr(lookup_table, src[i]);
if (p == NULL || src[i] == 0)
return -2;
ac += (p - lookup_table) << bits;
bits += 6;
if (bits >= 8) {
*cp++ = ac & 0xff;
ac >>= 8;
bits -= 8;
}
i++;
}
if (ac)
return -1;
return cp - dst;
}
u32 fscrypt_fname_encrypted_size(const struct inode *inode, u32 ilen)
{
int padding = 32;
struct fscrypt_info *ci = inode->i_crypt_info;
if (ci)
padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK);
ilen = max(ilen, (u32)FS_CRYPTO_BLOCK_SIZE);
return round_up(ilen, padding);
}
EXPORT_SYMBOL(fscrypt_fname_encrypted_size);
/**
* fscrypt_fname_crypto_alloc_obuff() -
*
* Allocates an output buffer that is sufficient for the crypto operation
* specified by the context and the direction.
*/
int fscrypt_fname_alloc_buffer(const struct inode *inode,
u32 ilen, struct fscrypt_str *crypto_str)
{
unsigned int olen = fscrypt_fname_encrypted_size(inode, ilen);
crypto_str->len = olen;
if (olen < FS_FNAME_CRYPTO_DIGEST_SIZE * 2)
olen = FS_FNAME_CRYPTO_DIGEST_SIZE * 2;
/*
* Allocated buffer can hold one more character to null-terminate the
* string
*/
crypto_str->name = kmalloc(olen + 1, GFP_NOFS);
if (!(crypto_str->name))
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_alloc_buffer);
/**
* fscrypt_fname_crypto_free_buffer() -
*
* Frees the buffer allocated for crypto operation.
*/
void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
{
if (!crypto_str)
return;
kfree(crypto_str->name);
crypto_str->name = NULL;
}
EXPORT_SYMBOL(fscrypt_fname_free_buffer);
/**
* fscrypt_fname_disk_to_usr() - converts a filename from disk space to user
* space
*
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_disk_to_usr(struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
{
const struct qstr qname = FSTR_TO_QSTR(iname);
char buf[24];
if (fscrypt_is_dot_dotdot(&qname)) {
oname->name[0] = '.';
oname->name[iname->len - 1] = '.';
oname->len = iname->len;
return 0;
}
if (iname->len < FS_CRYPTO_BLOCK_SIZE)
return -EUCLEAN;
if (inode->i_crypt_info)
return fname_decrypt(inode, iname, oname);
if (iname->len <= FS_FNAME_CRYPTO_DIGEST_SIZE) {
oname->len = digest_encode(iname->name, iname->len,
oname->name);
return 0;
}
if (hash) {
memcpy(buf, &hash, 4);
memcpy(buf + 4, &minor_hash, 4);
} else {
memset(buf, 0, 8);
}
memcpy(buf + 8, iname->name + iname->len - 16, 16);
oname->name[0] = '_';
oname->len = 1 + digest_encode(buf, 24, oname->name + 1);
return 0;
}
EXPORT_SYMBOL(fscrypt_fname_disk_to_usr);
/**
* fscrypt_fname_usr_to_disk() - converts a filename from user space to disk
* space
*
* The caller must have allocated sufficient memory for the @oname string.
*
* Return: 0 on success, -errno on failure
*/
int fscrypt_fname_usr_to_disk(struct inode *inode,
const struct qstr *iname,
struct fscrypt_str *oname)
{
if (fscrypt_is_dot_dotdot(iname)) {
oname->name[0] = '.';
oname->name[iname->len - 1] = '.';
oname->len = iname->len;
return 0;
}
if (inode->i_crypt_info)
return fname_encrypt(inode, iname, oname);
/*
* Without a proper key, a user is not allowed to modify the filenames
* in a directory. Consequently, a user space name cannot be mapped to
* a disk-space name
*/
return -ENOKEY;
}
EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
int lookup, struct fscrypt_name *fname)
{
int ret = 0, bigname = 0;
memset(fname, 0, sizeof(struct fscrypt_name));
fname->usr_fname = iname;
if (!dir->i_sb->s_cop->is_encrypted(dir) ||
fscrypt_is_dot_dotdot(iname)) {
fname->disk_name.name = (unsigned char *)iname->name;
fname->disk_name.len = iname->len;
return 0;
}
ret = fscrypt_get_crypt_info(dir);
if (ret && ret != -EOPNOTSUPP)
return ret;
if (dir->i_crypt_info) {
ret = fscrypt_fname_alloc_buffer(dir, iname->len,
&fname->crypto_buf);
if (ret)
return ret;
ret = fname_encrypt(dir, iname, &fname->crypto_buf);
if (ret)
goto errout;
fname->disk_name.name = fname->crypto_buf.name;
fname->disk_name.len = fname->crypto_buf.len;
return 0;
}
if (!lookup)
return -ENOKEY;
/*
* We don't have the key and we are doing a lookup; decode the
* user-supplied name
*/
if (iname->name[0] == '_')
bigname = 1;
if ((bigname && (iname->len != 33)) || (!bigname && (iname->len > 43)))
return -ENOENT;
fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
if (fname->crypto_buf.name == NULL)
return -ENOMEM;
ret = digest_decode(iname->name + bigname, iname->len - bigname,
fname->crypto_buf.name);
if (ret < 0) {
ret = -ENOENT;
goto errout;
}
fname->crypto_buf.len = ret;
if (bigname) {
memcpy(&fname->hash, fname->crypto_buf.name, 4);
memcpy(&fname->minor_hash, fname->crypto_buf.name + 4, 4);
} else {
fname->disk_name.name = fname->crypto_buf.name;
fname->disk_name.len = fname->crypto_buf.len;
}
return 0;
errout:
fscrypt_fname_free_buffer(&fname->crypto_buf);
return ret;
}
EXPORT_SYMBOL(fscrypt_setup_filename);
void fscrypt_free_filename(struct fscrypt_name *fname)
{
kfree(fname->crypto_buf.name);
fname->crypto_buf.name = NULL;
fname->usr_fname = NULL;
fname->disk_name.name = NULL;
}
EXPORT_SYMBOL(fscrypt_free_filename);
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3256_1 |
crossvul-cpp_data_good_4032_0 | /* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "jcontext.h"
#include "js-parser-internal.h"
#include "js-scanner-internal.h"
#include "lit-char-helpers.h"
#if ENABLED (JERRY_PARSER)
/** \addtogroup parser Parser
* @{
*
* \addtogroup jsparser JavaScript
* @{
*
* \addtogroup jsparser_scanner Scanner
* @{
*/
/**
* Scan return types.
*/
typedef enum
{
SCAN_NEXT_TOKEN, /**< get next token after return */
SCAN_KEEP_TOKEN, /**< keep the current token after return */
} scan_return_types_t;
/**
* Checks whether token type is "of".
*/
#if ENABLED (JERRY_ES2015)
#define SCANNER_IDENTIFIER_IS_OF() (lexer_token_is_identifier (context_p, "of", 2))
#else
#define SCANNER_IDENTIFIER_IS_OF() (false)
#endif /* ENABLED (JERRY_ES2015) */
#if ENABLED (JERRY_ES2015)
JERRY_STATIC_ASSERT (SCANNER_FROM_LITERAL_POOL_TO_COMPUTED (SCANNER_LITERAL_POOL_GENERATOR)
== SCAN_STACK_COMPUTED_GENERATOR,
scanner_invalid_conversion_from_literal_pool_generator_to_computed_generator);
JERRY_STATIC_ASSERT (SCANNER_FROM_LITERAL_POOL_TO_COMPUTED (SCANNER_LITERAL_POOL_ASYNC)
== SCAN_STACK_COMPUTED_ASYNC,
scanner_invalid_conversion_from_literal_pool_async_to_computed_async);
JERRY_STATIC_ASSERT (SCANNER_FROM_COMPUTED_TO_LITERAL_POOL (SCAN_STACK_COMPUTED_GENERATOR)
== SCANNER_LITERAL_POOL_GENERATOR,
scanner_invalid_conversion_from_computed_generator_to_literal_pool_generator);
JERRY_STATIC_ASSERT (SCANNER_FROM_COMPUTED_TO_LITERAL_POOL (SCAN_STACK_COMPUTED_ASYNC)
== SCANNER_LITERAL_POOL_ASYNC,
scanner_invalid_conversion_from_computed_async_to_literal_pool_async);
#endif /* ENABLED (JERRY_ES2015) */
/**
* Scan primary expression.
*
* @return SCAN_NEXT_TOKEN to read the next token, or SCAN_KEEP_TOKEN to do nothing
*/
static scan_return_types_t
scanner_scan_primary_expression (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p, /* scanner context */
lexer_token_type_t type, /**< current token type */
scan_stack_modes_t stack_top) /**< current stack top */
{
switch (type)
{
case LEXER_KEYW_NEW:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_AFTER_NEW;
#if ENABLED (JERRY_ES2015)
if (scanner_try_scan_new_target (context_p))
{
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
}
#endif /* ENABLED (JERRY_ES2015) */
break;
}
case LEXER_DIVIDE:
case LEXER_ASSIGN_DIVIDE:
{
lexer_construct_regexp_object (context_p, true);
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
break;
}
case LEXER_KEYW_FUNCTION:
{
uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION;
#if ENABLED (JERRY_ES2015)
if (scanner_context_p->async_source_p != NULL)
{
status_flags |= SCANNER_LITERAL_POOL_ASYNC;
}
if (lexer_consume_generator (context_p))
{
status_flags |= SCANNER_LITERAL_POOL_GENERATOR;
}
#endif /* ENABLED (JERRY_ES2015) */
scanner_push_literal_pool (context_p, scanner_context_p, status_flags);
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
lexer_next_token (context_p);
}
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_EXPRESSION);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
return SCAN_KEEP_TOKEN;
}
case LEXER_LEFT_PAREN:
{
scanner_scan_bracket (context_p, scanner_context_p);
return SCAN_KEEP_TOKEN;
}
case LEXER_LEFT_SQUARE:
{
#if ENABLED (JERRY_ES2015)
scanner_push_destructuring_pattern (context_p, scanner_context_p, SCANNER_BINDING_NONE, false);
#endif /* ENABLED (JERRY_ES2015) */
parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
case LEXER_LEFT_BRACE:
{
#if ENABLED (JERRY_ES2015)
scanner_push_destructuring_pattern (context_p, scanner_context_p, SCANNER_BINDING_NONE, false);
#endif /* ENABLED (JERRY_ES2015) */
parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL);
scanner_context_p->mode = SCAN_MODE_PROPERTY_NAME;
return SCAN_KEEP_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case LEXER_TEMPLATE_LITERAL:
{
if (context_p->source_p[-1] != LIT_CHAR_GRAVE_ACCENT)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_TEMPLATE_STRING);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
/* The string is a normal string literal. */
/* FALLTHRU */
}
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_LITERAL:
{
#if ENABLED (JERRY_ES2015)
const uint8_t *source_p = context_p->source_p;
if (context_p->token.lit_location.type == LEXER_IDENT_LITERAL
&& lexer_check_arrow (context_p))
{
scanner_scan_simple_arrow (context_p, scanner_context_p, source_p);
return SCAN_KEEP_TOKEN;
}
else if (JERRY_UNLIKELY (lexer_token_is_async (context_p)))
{
scanner_context_p->async_source_p = source_p;
scanner_check_async_function (context_p, scanner_context_p);
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
scanner_add_reference (context_p, scanner_context_p);
}
/* FALLTHRU */
}
case LEXER_KEYW_THIS:
case LEXER_KEYW_SUPER:
case LEXER_LIT_TRUE:
case LEXER_LIT_FALSE:
case LEXER_LIT_NULL:
{
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
break;
}
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_CLASS:
{
scanner_push_class_declaration (context_p, scanner_context_p, SCAN_STACK_CLASS_EXPRESSION);
if (context_p->token.type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
return SCAN_KEEP_TOKEN;
}
break;
}
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_RIGHT_SQUARE:
{
if (stack_top != SCAN_STACK_ARRAY_LITERAL)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
return SCAN_KEEP_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case LEXER_THREE_DOTS:
{
/* Elision or spread arguments */
if (stack_top != SCAN_STACK_PAREN_EXPRESSION && stack_top != SCAN_STACK_ARRAY_LITERAL)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_COMMA:
{
if (stack_top != SCAN_STACK_ARRAY_LITERAL)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
#if ENABLED (JERRY_ES2015)
if (scanner_context_p->binding_type != SCANNER_BINDING_NONE)
{
scanner_context_p->mode = SCAN_MODE_BINDING;
}
#endif /* ENABLED (JERRY_ES2015) */
break;
}
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_YIELD:
{
lexer_next_token (context_p);
if (lexer_check_yield_no_arg (context_p))
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
}
if (context_p->token.type == LEXER_MULTIPLY)
{
return SCAN_NEXT_TOKEN;
}
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_RIGHT_PAREN:
{
if (stack_top == SCAN_STACK_PAREN_EXPRESSION)
{
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
parser_stack_pop_uint8 (context_p);
break;
}
/* FALLTHRU */
}
default:
{
scanner_raise_error (context_p);
}
}
return SCAN_NEXT_TOKEN;
} /* scanner_scan_primary_expression */
/**
* Scan the tokens after the primary expression.
*
* @return true for break, false for fall through
*/
static bool
scanner_scan_post_primary_expression (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p, /**< scanner context */
lexer_token_type_t type, /**< current token type */
scan_stack_modes_t stack_top) /**< current stack top */
{
switch (type)
{
case LEXER_DOT:
{
lexer_scan_identifier (context_p);
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
return true;
}
case LEXER_LEFT_PAREN:
{
parser_stack_push_uint8 (context_p, SCAN_STACK_PAREN_EXPRESSION);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return true;
}
#if ENABLED (JERRY_ES2015)
case LEXER_TEMPLATE_LITERAL:
{
if (JERRY_UNLIKELY (context_p->source_p[-1] != LIT_CHAR_GRAVE_ACCENT))
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
parser_stack_push_uint8 (context_p, SCAN_STACK_TAGGED_TEMPLATE_LITERAL);
}
return true;
}
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_LEFT_SQUARE:
{
parser_stack_push_uint8 (context_p, SCAN_STACK_PROPERTY_ACCESSOR);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return true;
}
case LEXER_INCREASE:
case LEXER_DECREASE:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
if (context_p->token.flags & LEXER_WAS_NEWLINE)
{
return false;
}
lexer_next_token (context_p);
type = (lexer_token_type_t) context_p->token.type;
if (type != LEXER_QUESTION_MARK)
{
break;
}
/* FALLTHRU */
}
case LEXER_QUESTION_MARK:
{
parser_stack_push_uint8 (context_p, SCAN_STACK_COLON_EXPRESSION);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return true;
}
default:
{
break;
}
}
if (LEXER_IS_BINARY_OP_TOKEN (type)
&& (type != LEXER_KEYW_IN || !SCANNER_IS_FOR_START (stack_top)))
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return true;
}
return false;
} /* scanner_scan_post_primary_expression */
/**
* Scan the tokens after the primary expression.
*
* @return SCAN_NEXT_TOKEN to read the next token, or SCAN_KEEP_TOKEN to do nothing
*/
static scan_return_types_t
scanner_scan_primary_expression_end (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p, /**< scanner context */
lexer_token_type_t type, /**< current token type */
scan_stack_modes_t stack_top) /**< current stack top */
{
if (type == LEXER_COMMA)
{
switch (stack_top)
{
case SCAN_STACK_VAR:
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_LET:
case SCAN_STACK_CONST:
#endif /* ENABLED (JERRY_ES2015) */
case SCAN_STACK_FOR_VAR_START:
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_FOR_LET_START:
case SCAN_STACK_FOR_CONST_START:
#endif /* ENABLED (JERRY_ES2015) */
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_COLON_EXPRESSION:
{
scanner_raise_error (context_p);
break;
}
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_BINDING_INIT:
case SCAN_STACK_BINDING_LIST_INIT:
{
break;
}
case SCAN_STACK_ARROW_ARGUMENTS:
{
lexer_next_token (context_p);
scanner_check_arrow_arg (context_p, scanner_context_p);
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_ARROW_EXPRESSION:
{
break;
}
case SCAN_STACK_FUNCTION_PARAMETERS:
{
scanner_context_p->mode = SCAN_MODE_CONTINUE_FUNCTION_ARGUMENTS;
parser_stack_pop_uint8 (context_p);
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_ARRAY_LITERAL:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (scanner_context_p->binding_type != SCANNER_BINDING_NONE)
{
scanner_context_p->mode = SCAN_MODE_BINDING;
}
return SCAN_NEXT_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
case SCAN_STACK_OBJECT_LITERAL:
{
scanner_context_p->mode = SCAN_MODE_PROPERTY_NAME;
return SCAN_KEEP_TOKEN;
}
default:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
}
}
switch (stack_top)
{
case SCAN_STACK_WITH_EXPRESSION:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
parser_stack_pop_uint8 (context_p);
uint16_t status_flags = scanner_context_p->active_literal_pool_p->status_flags;
parser_stack_push_uint8 (context_p, (status_flags & SCANNER_LITERAL_POOL_IN_WITH) ? 1 : 0);
parser_stack_push_uint8 (context_p, SCAN_STACK_WITH_STATEMENT);
status_flags |= SCANNER_LITERAL_POOL_IN_WITH;
scanner_context_p->active_literal_pool_p->status_flags = status_flags;
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_DO_EXPRESSION:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_WHILE_EXPRESSION:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
scanner_source_start_t source_start;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &source_start, sizeof (scanner_source_start_t));
scanner_location_info_t *location_info_p;
location_info_p = (scanner_location_info_t *) scanner_insert_info (context_p,
source_start.source_p,
sizeof (scanner_location_info_t));
location_info_p->info.type = SCANNER_TYPE_WHILE;
scanner_get_location (&location_info_p->location, context_p);
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_PAREN_EXPRESSION:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
parser_stack_pop_uint8 (context_p);
#if ENABLED (JERRY_ES2015)
if (context_p->stack_top_uint8 == SCAN_STACK_USE_ASYNC)
{
scanner_add_async_literal (context_p, scanner_context_p);
}
#endif /* ENABLED (JERRY_ES2015) */
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_STATEMENT_WITH_EXPR:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
parser_stack_pop_uint8 (context_p);
#if ENABLED (JERRY_ES2015)
if (context_p->stack_top_uint8 == SCAN_STACK_IF_STATEMENT)
{
scanner_check_function_after_if (context_p, scanner_context_p);
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_NEXT_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_BINDING_LIST_INIT:
{
parser_stack_pop_uint8 (context_p);
JERRY_ASSERT (context_p->stack_top_uint8 == SCAN_STACK_ARRAY_LITERAL
|| context_p->stack_top_uint8 == SCAN_STACK_OBJECT_LITERAL
|| context_p->stack_top_uint8 == SCAN_STACK_LET
|| context_p->stack_top_uint8 == SCAN_STACK_CONST
|| context_p->stack_top_uint8 == SCAN_STACK_FOR_LET_START
|| context_p->stack_top_uint8 == SCAN_STACK_FOR_CONST_START
|| context_p->stack_top_uint8 == SCAN_STACK_FUNCTION_PARAMETERS
|| context_p->stack_top_uint8 == SCAN_STACK_ARROW_ARGUMENTS);
scanner_binding_item_t *item_p = scanner_context_p->active_binding_list_p->items_p;
while (item_p != NULL)
{
if (item_p->literal_p->type & SCANNER_LITERAL_IS_USED)
{
item_p->literal_p->type |= SCANNER_LITERAL_EARLY_CREATE;
}
item_p = item_p->next_p;
}
scanner_pop_binding_list (scanner_context_p);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_BINDING_INIT:
{
scanner_binding_literal_t binding_literal;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &binding_literal, sizeof (scanner_binding_literal_t));
JERRY_ASSERT (context_p->stack_top_uint8 == SCAN_STACK_ARRAY_LITERAL
|| context_p->stack_top_uint8 == SCAN_STACK_OBJECT_LITERAL
|| context_p->stack_top_uint8 == SCAN_STACK_LET
|| context_p->stack_top_uint8 == SCAN_STACK_CONST
|| context_p->stack_top_uint8 == SCAN_STACK_FOR_LET_START
|| context_p->stack_top_uint8 == SCAN_STACK_FOR_CONST_START
|| context_p->stack_top_uint8 == SCAN_STACK_FUNCTION_PARAMETERS
|| context_p->stack_top_uint8 == SCAN_STACK_ARROW_ARGUMENTS);
JERRY_ASSERT ((stack_top != SCAN_STACK_ARRAY_LITERAL && stack_top != SCAN_STACK_OBJECT_LITERAL)
|| SCANNER_NEEDS_BINDING_LIST (scanner_context_p->binding_type));
if (binding_literal.literal_p->type & SCANNER_LITERAL_IS_USED)
{
binding_literal.literal_p->type |= SCANNER_LITERAL_EARLY_CREATE;
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
case SCAN_STACK_VAR:
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_LET:
case SCAN_STACK_CONST:
#endif /* ENABLED (JERRY_ES2015) */
{
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
scanner_context_p->active_literal_pool_p->status_flags &= (uint16_t) ~SCANNER_LITERAL_POOL_IN_EXPORT;
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
parser_stack_pop_uint8 (context_p);
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_FOR_VAR_START:
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_FOR_LET_START:
case SCAN_STACK_FOR_CONST_START:
#endif /* ENABLED (JERRY_ES2015) */
case SCAN_STACK_FOR_START:
{
if (type == LEXER_KEYW_IN || SCANNER_IDENTIFIER_IS_OF ())
{
scanner_for_statement_t for_statement;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &for_statement, sizeof (scanner_for_statement_t));
scanner_location_info_t *location_info;
location_info = (scanner_location_info_t *) scanner_insert_info (context_p,
for_statement.u.source_p,
sizeof (scanner_location_info_t));
#if ENABLED (JERRY_ES2015)
location_info->info.type = (type == LEXER_KEYW_IN) ? SCANNER_TYPE_FOR_IN : SCANNER_TYPE_FOR_OF;
if (stack_top == SCAN_STACK_FOR_LET_START || stack_top == SCAN_STACK_FOR_CONST_START)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_PRIVATE_BLOCK_EARLY);
}
#else /* !ENABLED (JERRY_ES2015) */
location_info->info.type = SCANNER_TYPE_FOR_IN;
#endif /* ENABLED (JERRY_ES2015) */
scanner_get_location (&location_info->location, context_p);
parser_stack_push_uint8 (context_p, SCAN_STACK_STATEMENT_WITH_EXPR);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
if (type != LEXER_SEMICOLON)
{
break;
}
scanner_for_statement_t for_statement;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, NULL, sizeof (scanner_for_statement_t));
#if ENABLED (JERRY_ES2015)
if (stack_top == SCAN_STACK_FOR_LET_START || stack_top == SCAN_STACK_FOR_CONST_START)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_PRIVATE_BLOCK);
}
#endif /* ENABLED (JERRY_ES2015) */
for_statement.u.source_p = context_p->source_p;
parser_stack_push (context_p, &for_statement, sizeof (scanner_for_statement_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_FOR_CONDITION);
lexer_next_token (context_p);
if (context_p->token.type != LEXER_SEMICOLON)
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_KEEP_TOKEN;
}
type = LEXER_SEMICOLON;
/* FALLTHRU */
}
case SCAN_STACK_FOR_CONDITION:
{
if (type != LEXER_SEMICOLON)
{
break;
}
scanner_for_statement_t for_statement;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &for_statement, sizeof (scanner_for_statement_t));
scanner_for_info_t *for_info_p;
for_info_p = (scanner_for_info_t *) scanner_insert_info (context_p,
for_statement.u.source_p,
sizeof (scanner_for_info_t));
for_info_p->info.type = SCANNER_TYPE_FOR;
scanner_get_location (&for_info_p->expression_location, context_p);
for_info_p->end_location.source_p = NULL;
for_statement.u.for_info_p = for_info_p;
parser_stack_push (context_p, &for_statement, sizeof (scanner_for_statement_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_FOR_EXPRESSION);
lexer_next_token (context_p);
if (context_p->token.type != LEXER_RIGHT_PAREN)
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_KEEP_TOKEN;
}
type = LEXER_RIGHT_PAREN;
/* FALLTHRU */
}
case SCAN_STACK_FOR_EXPRESSION:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
scanner_for_statement_t for_statement;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &for_statement, sizeof (scanner_for_statement_t));
scanner_get_location (&for_statement.u.for_info_p->end_location, context_p);
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_SWITCH_EXPRESSION:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_BRACE)
{
break;
}
#if ENABLED (JERRY_ES2015)
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = context_p->source_p - 1;
#endif /* ENABLED (JERRY_ES2015) */
parser_stack_pop_uint8 (context_p);
scanner_switch_statement_t switch_statement = scanner_context_p->active_switch_statement;
parser_stack_push (context_p, &switch_statement, sizeof (scanner_switch_statement_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_SWITCH_BLOCK);
scanner_switch_info_t *switch_info_p;
switch_info_p = (scanner_switch_info_t *) scanner_insert_info (context_p,
context_p->source_p,
sizeof (scanner_switch_info_t));
switch_info_p->info.type = SCANNER_TYPE_SWITCH;
switch_info_p->case_p = NULL;
scanner_context_p->active_switch_statement.last_case_p = &switch_info_p->case_p;
lexer_next_token (context_p);
if (context_p->token.type != LEXER_RIGHT_BRACE
&& context_p->token.type != LEXER_KEYW_CASE
&& context_p->token.type != LEXER_KEYW_DEFAULT)
{
break;
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_CASE_STATEMENT:
{
if (type != LEXER_COLON)
{
break;
}
scanner_source_start_t source_start;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &source_start, sizeof (scanner_source_start_t));
scanner_location_info_t *location_info_p;
location_info_p = (scanner_location_info_t *) scanner_insert_info (context_p,
source_start.source_p,
sizeof (scanner_location_info_t));
location_info_p->info.type = SCANNER_TYPE_CASE;
scanner_get_location (&location_info_p->location, context_p);
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_COLON_EXPRESSION:
{
if (type != LEXER_COLON)
{
break;
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
parser_stack_pop_uint8 (context_p);
return SCAN_NEXT_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_ARRAY_LITERAL:
case SCAN_STACK_OBJECT_LITERAL:
{
if (((stack_top == SCAN_STACK_ARRAY_LITERAL) && (type != LEXER_RIGHT_SQUARE))
|| ((stack_top == SCAN_STACK_OBJECT_LITERAL) && (type != LEXER_RIGHT_BRACE)))
{
break;
}
scanner_source_start_t source_start;
uint8_t binding_type = scanner_context_p->binding_type;
parser_stack_pop_uint8 (context_p);
scanner_context_p->binding_type = context_p->stack_top_uint8;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &source_start, sizeof (scanner_source_start_t));
lexer_next_token (context_p);
if (binding_type == SCANNER_BINDING_CATCH && context_p->stack_top_uint8 == SCAN_STACK_CATCH_STATEMENT)
{
scanner_pop_binding_list (scanner_context_p);
if (context_p->token.type != LEXER_RIGHT_PAREN)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_BRACE)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
return SCAN_NEXT_TOKEN;
}
if (context_p->token.type != LEXER_ASSIGN)
{
if (SCANNER_NEEDS_BINDING_LIST (binding_type))
{
scanner_pop_binding_list (scanner_context_p);
}
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_KEEP_TOKEN;
}
scanner_location_info_t *location_info_p;
location_info_p = (scanner_location_info_t *) scanner_insert_info (context_p,
source_start.source_p,
sizeof (scanner_location_info_t));
location_info_p->info.type = SCANNER_TYPE_INITIALIZER;
scanner_get_location (&location_info_p->location, context_p);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (SCANNER_NEEDS_BINDING_LIST (binding_type))
{
scanner_binding_item_t *item_p = scanner_context_p->active_binding_list_p->items_p;
while (item_p != NULL)
{
item_p->literal_p->type &= (uint8_t) ~SCANNER_LITERAL_IS_USED;
item_p = item_p->next_p;
}
parser_stack_push_uint8 (context_p, SCAN_STACK_BINDING_LIST_INIT);
}
return SCAN_NEXT_TOKEN;
}
#else /* !ENABLED (JERRY_ES2015) */
case SCAN_STACK_ARRAY_LITERAL:
#endif /* ENABLED (JERRY_ES2015) */
case SCAN_STACK_PROPERTY_ACCESSOR:
{
if (type != LEXER_RIGHT_SQUARE)
{
break;
}
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
parser_stack_pop_uint8 (context_p);
return SCAN_NEXT_TOKEN;
}
#if !ENABLED (JERRY_ES2015)
case SCAN_STACK_OBJECT_LITERAL:
{
if (type != LEXER_RIGHT_BRACE)
{
break;
}
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
parser_stack_pop_uint8 (context_p);
return SCAN_NEXT_TOKEN;
}
#endif /* !ENABLED (JERRY_ES2015) */
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_COMPUTED_PROPERTY:
{
if (type != LEXER_RIGHT_SQUARE)
{
break;
}
lexer_next_token (context_p);
parser_stack_pop_uint8 (context_p);
stack_top = (scan_stack_modes_t) context_p->stack_top_uint8;
if (stack_top == SCAN_STACK_FUNCTION_PROPERTY)
{
scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_FUNCTION);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
return SCAN_KEEP_TOKEN;
}
JERRY_ASSERT (stack_top == SCAN_STACK_OBJECT_LITERAL);
if (context_p->token.type == LEXER_LEFT_PAREN)
{
scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_FUNCTION);
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PROPERTY);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
return SCAN_KEEP_TOKEN;
}
if (context_p->token.type != LEXER_COLON)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (scanner_context_p->binding_type != SCANNER_BINDING_NONE)
{
scanner_context_p->mode = SCAN_MODE_BINDING;
}
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_COMPUTED_GENERATOR:
case SCAN_STACK_COMPUTED_ASYNC:
case SCAN_STACK_COMPUTED_ASYNC_GENERATOR:
{
if (type != LEXER_RIGHT_SQUARE)
{
break;
}
lexer_next_token (context_p);
parser_stack_pop_uint8 (context_p);
JERRY_ASSERT (context_p->stack_top_uint8 == SCAN_STACK_OBJECT_LITERAL
|| context_p->stack_top_uint8 == SCAN_STACK_FUNCTION_PROPERTY);
uint16_t status_flags = (uint16_t) (SCANNER_LITERAL_POOL_FUNCTION
| SCANNER_LITERAL_POOL_GENERATOR
| SCANNER_FROM_COMPUTED_TO_LITERAL_POOL (stack_top));
scanner_push_literal_pool (context_p, scanner_context_p, status_flags);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_TEMPLATE_STRING:
case SCAN_STACK_TAGGED_TEMPLATE_LITERAL:
{
if (type != LEXER_RIGHT_BRACE)
{
break;
}
context_p->source_p--;
context_p->column--;
lexer_parse_string (context_p, LEXER_STRING_NO_OPTS);
if (context_p->source_p[-1] != LIT_CHAR_GRAVE_ACCENT)
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
}
else
{
parser_stack_pop_uint8 (context_p);
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
}
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_ARROW_ARGUMENTS:
{
if (type != LEXER_RIGHT_PAREN)
{
break;
}
scanner_check_arrow (context_p, scanner_context_p);
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_ARROW_EXPRESSION:
{
scanner_pop_literal_pool (context_p, scanner_context_p);
parser_stack_pop_uint8 (context_p);
lexer_update_await_yield (context_p, context_p->status_flags);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_CLASS_EXTENDS:
{
if (type != LEXER_LEFT_BRACE)
{
break;
}
scanner_context_p->mode = SCAN_MODE_CLASS_METHOD;
parser_stack_pop_uint8 (context_p);
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_FUNCTION_PARAMETERS:
{
parser_stack_pop_uint8 (context_p);
if (type != LEXER_RIGHT_PAREN
&& (type != LEXER_EOS || context_p->stack_top_uint8 != SCAN_STACK_SCRIPT_FUNCTION))
{
break;
}
scanner_context_p->mode = SCAN_MODE_CONTINUE_FUNCTION_ARGUMENTS;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
default:
{
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_KEEP_TOKEN;
}
}
scanner_raise_error (context_p);
return SCAN_NEXT_TOKEN;
} /* scanner_scan_primary_expression_end */
/**
* Scan statements.
*
* @return SCAN_NEXT_TOKEN to read the next token, or SCAN_KEEP_TOKEN to do nothing
*/
static scan_return_types_t
scanner_scan_statement (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p, /**< scanner context */
lexer_token_type_t type, /**< current token type */
scan_stack_modes_t stack_top) /**< current stack top */
{
switch (type)
{
case LEXER_SEMICOLON:
{
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_KEEP_TOKEN;
}
case LEXER_LEFT_BRACE:
{
#if ENABLED (JERRY_ES2015)
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p,
scanner_context_p,
SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
parser_stack_push_uint8 (context_p, SCAN_STACK_BLOCK_STATEMENT);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_DO:
{
scanner_context_p->mode = SCAN_MODE_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_DO_STATEMENT);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_TRY:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_BRACE)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p,
scanner_context_p,
SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
parser_stack_push_uint8 (context_p, SCAN_STACK_TRY_STATEMENT);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_DEBUGGER:
{
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_IF:
case LEXER_KEYW_WITH:
case LEXER_KEYW_SWITCH:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
uint8_t mode = SCAN_STACK_STATEMENT_WITH_EXPR;
if (type == LEXER_KEYW_IF)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_IF_STATEMENT);
}
else if (type == LEXER_KEYW_WITH)
{
mode = SCAN_STACK_WITH_EXPRESSION;
}
else if (type == LEXER_KEYW_SWITCH)
{
mode = SCAN_STACK_SWITCH_EXPRESSION;
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
parser_stack_push_uint8 (context_p, mode);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_WHILE:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
scanner_source_start_t source_start;
source_start.source_p = context_p->source_p;
parser_stack_push (context_p, &source_start, sizeof (scanner_source_start_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_WHILE_EXPRESSION);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_FOR:
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
scanner_for_statement_t for_statement;
for_statement.u.source_p = context_p->source_p;
uint8_t stack_mode = SCAN_STACK_FOR_START;
scan_return_types_t return_type = SCAN_KEEP_TOKEN;
lexer_next_token (context_p);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
#if ENABLED (JERRY_ES2015)
const uint8_t *source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
switch (context_p->token.type)
{
case LEXER_SEMICOLON:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
break;
}
case LEXER_KEYW_VAR:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
stack_mode = SCAN_STACK_FOR_VAR_START;
return_type = SCAN_NEXT_TOKEN;
break;
}
#if ENABLED (JERRY_ES2015)
case LEXER_LITERAL:
{
if (!lexer_token_is_let (context_p))
{
break;
}
parser_line_counter_t line = context_p->line;
parser_line_counter_t column = context_p->column;
if (lexer_check_arrow (context_p))
{
context_p->source_p = source_p;
context_p->line = line;
context_p->column = column;
context_p->token.flags &= (uint8_t) ~LEXER_NO_SKIP_SPACES;
break;
}
lexer_next_token (context_p);
type = (lexer_token_type_t) context_p->token.type;
if (type != LEXER_LEFT_SQUARE
&& type != LEXER_LEFT_BRACE
&& (type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL))
{
scanner_info_t *info_p = scanner_insert_info (context_p, source_p, sizeof (scanner_info_t));
info_p->type = SCANNER_TYPE_LET_EXPRESSION;
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
break;
}
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
/* FALLTHRU */
}
case LEXER_KEYW_LET:
case LEXER_KEYW_CONST:
{
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = source_p;
if (scanner_context_p->mode == SCAN_MODE_PRIMARY_EXPRESSION)
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
return_type = SCAN_NEXT_TOKEN;
}
stack_mode = ((context_p->token.type == LEXER_KEYW_CONST) ? SCAN_STACK_FOR_CONST_START
: SCAN_STACK_FOR_LET_START);
break;
}
#endif /* ENABLED (JERRY_ES2015) */
}
parser_stack_push (context_p, &for_statement, sizeof (scanner_for_statement_t));
parser_stack_push_uint8 (context_p, stack_mode);
return return_type;
}
case LEXER_KEYW_VAR:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_VAR);
return SCAN_NEXT_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_LET:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_LET);
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_CONST:
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_CONST);
return SCAN_NEXT_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_KEYW_THROW:
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_RETURN:
{
lexer_next_token (context_p);
if (!(context_p->token.flags & LEXER_WAS_NEWLINE)
&& context_p->token.type != LEXER_SEMICOLON
&& context_p->token.type != LEXER_EOS
&& context_p->token.type != LEXER_RIGHT_BRACE)
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_KEEP_TOKEN;
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
return SCAN_KEEP_TOKEN;
}
case LEXER_KEYW_BREAK:
case LEXER_KEYW_CONTINUE:
{
lexer_next_token (context_p);
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
if (!(context_p->token.flags & LEXER_WAS_NEWLINE)
&& context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
return SCAN_NEXT_TOKEN;
}
return SCAN_KEEP_TOKEN;
}
case LEXER_KEYW_CASE:
case LEXER_KEYW_DEFAULT:
{
if (stack_top != SCAN_STACK_SWITCH_BLOCK)
{
scanner_raise_error (context_p);
}
scanner_case_info_t *case_info_p;
case_info_p = (scanner_case_info_t *) scanner_malloc (context_p, sizeof (scanner_case_info_t));
*(scanner_context_p->active_switch_statement.last_case_p) = case_info_p;
scanner_context_p->active_switch_statement.last_case_p = &case_info_p->next_p;
case_info_p->next_p = NULL;
scanner_get_location (&case_info_p->location, context_p);
if (type == LEXER_KEYW_DEFAULT)
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_COLON)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
return SCAN_NEXT_TOKEN;
}
scanner_source_start_t source_start;
source_start.source_p = context_p->source_p;
parser_stack_push (context_p, &source_start, sizeof (scanner_source_start_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_CASE_STATEMENT);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_FUNCTION:
{
#if ENABLED (JERRY_ES2015)
uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION | SCANNER_LITERAL_POOL_FUNCTION_STATEMENT;
if (scanner_context_p->async_source_p != NULL)
{
scanner_context_p->status_flags |= SCANNER_CONTEXT_THROW_ERR_ASYNC_FUNCTION;
status_flags |= SCANNER_LITERAL_POOL_ASYNC;
}
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_MULTIPLY)
{
status_flags |= SCANNER_LITERAL_POOL_GENERATOR;
lexer_next_token (context_p);
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
const uint8_t mask = (SCANNER_LITERAL_IS_ARG | SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_LOCAL);
if ((literal_p->type & SCANNER_LITERAL_IS_LOCAL)
&& (literal_p->type & mask) != (SCANNER_LITERAL_IS_ARG | SCANNER_LITERAL_IS_DESTRUCTURED_ARG)
&& (literal_p->type & mask) != (SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_FUNC_DECLARATION))
{
scanner_raise_redeclaration_error (context_p);
}
literal_p->type |= SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_FUNC_DECLARATION;
scanner_context_p->status_flags &= (uint16_t) ~SCANNER_CONTEXT_THROW_ERR_ASYNC_FUNCTION;
#else
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_IS_FUNC;
uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION;
#endif /* ENABLED (JERRY_ES2015) */
scanner_push_literal_pool (context_p, scanner_context_p, status_flags);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_STATEMENT);
return SCAN_NEXT_TOKEN;
}
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_CLASS:
{
scanner_push_class_declaration (context_p, scanner_context_p, SCAN_STACK_CLASS_STATEMENT);
if (context_p->token.type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LET;
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
if (scanner_context_p->active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_EXPORT)
{
literal_p->type |= SCANNER_LITERAL_NO_REG;
scanner_context_p->active_literal_pool_p->status_flags &= (uint16_t) ~SCANNER_LITERAL_POOL_IN_EXPORT;
}
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
return SCAN_NEXT_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
case LEXER_KEYW_IMPORT:
{
if (stack_top != SCAN_STACK_SCRIPT)
{
scanner_raise_error (context_p);
}
context_p->global_status_flags |= ECMA_PARSE_MODULE;
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_STRING_LITERAL)
{
return SCAN_NEXT_TOKEN;
}
bool parse_imports = true;
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LOCAL | SCANNER_LITERAL_NO_REG;
#else /* !ENABLED (JERRY_ES2015) */
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_NO_REG;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
if (context_p->token.type == LEXER_COMMA)
{
lexer_next_token (context_p);
}
else
{
parse_imports = false;
}
}
if (parse_imports)
{
if (context_p->token.type == LEXER_MULTIPLY)
{
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "as", 2))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LOCAL | SCANNER_LITERAL_NO_REG;
#else /* !ENABLED (JERRY_ES2015) */
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_NO_REG;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
}
else if (context_p->token.type == LEXER_LEFT_BRACE)
{
lexer_next_token (context_p);
while (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
const uint8_t *source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
if (lexer_check_next_character (context_p, LIT_CHAR_LOWERCASE_A))
{
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "as", 2))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
if (literal_p->type & (SCANNER_LITERAL_IS_ARG
| SCANNER_LITERAL_IS_VAR
| SCANNER_LITERAL_IS_LOCAL))
{
context_p->source_p = source_p;
scanner_raise_redeclaration_error (context_p);
}
if (literal_p->type & SCANNER_LITERAL_IS_FUNC)
{
literal_p->type &= (uint8_t) ~SCANNER_LITERAL_IS_FUNC;
}
literal_p->type |= SCANNER_LITERAL_IS_LOCAL | SCANNER_LITERAL_NO_REG;
#else /* !ENABLED (JERRY_ES2015) */
literal_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_NO_REG;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
if (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_COMMA)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
}
}
lexer_next_token (context_p);
}
else
{
scanner_raise_error (context_p);
}
}
if (!lexer_token_is_identifier (context_p, "from", 4))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type != LEXER_STRING_LITERAL)
{
scanner_raise_error (context_p);
}
return SCAN_NEXT_TOKEN;
}
case LEXER_KEYW_EXPORT:
{
if (stack_top != SCAN_STACK_SCRIPT)
{
scanner_raise_error (context_p);
}
context_p->global_status_flags |= ECMA_PARSE_MODULE;
lexer_next_token (context_p);
if (context_p->token.type == LEXER_KEYW_DEFAULT)
{
lexer_next_token (context_p);
if (context_p->token.type == LEXER_KEYW_FUNCTION)
{
lexer_next_token (context_p);
if (context_p->token.type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
lexer_lit_location_t *location_p = scanner_add_literal (context_p, scanner_context_p);
#if ENABLED (JERRY_ES2015)
if (location_p->type & SCANNER_LITERAL_IS_LOCAL
&& !(location_p->type & SCANNER_LITERAL_IS_FUNC))
{
scanner_raise_redeclaration_error (context_p);
}
location_p->type |= SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_LET;
#else /* !ENABLED (JERRY_ES2015) */
location_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_IS_FUNC;
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
}
else
{
lexer_lit_location_t *location_p;
location_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&lexer_default_literal);
#if ENABLED (JERRY_ES2015)
location_p->type |= SCANNER_LITERAL_IS_FUNC | SCANNER_LITERAL_IS_LET;
#else /* !ENABLED (JERRY_ES2015) */
location_p->type |= SCANNER_LITERAL_IS_VAR | SCANNER_LITERAL_IS_FUNC;
#endif /* ENABLED (JERRY_ES2015) */
}
scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_FUNCTION);
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_STATEMENT);
scanner_context_p->mode = SCAN_MODE_FUNCTION_ARGUMENTS;
return SCAN_KEEP_TOKEN;
}
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_KEYW_CLASS)
{
scanner_push_class_declaration (context_p, scanner_context_p, SCAN_STACK_CLASS_STATEMENT);
if (context_p->token.type == LEXER_LITERAL && context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, scanner_context_p);
scanner_detect_invalid_let (context_p, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_LET | SCANNER_LITERAL_NO_REG;
return SCAN_NEXT_TOKEN;
}
lexer_lit_location_t *literal_p;
literal_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&lexer_default_literal);
literal_p->type |= SCANNER_LITERAL_IS_LET | SCANNER_LITERAL_NO_REG;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
/* Assignment expression. */
lexer_lit_location_t *location_p;
location_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&lexer_default_literal);
location_p->type |= SCANNER_LITERAL_IS_VAR;
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (context_p->token.type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
return SCAN_KEEP_TOKEN;
}
location_p = scanner_add_literal (context_p, scanner_context_p);
location_p->type |= SCANNER_LITERAL_IS_VAR;
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_END;
if (context_p->token.type == LEXER_MULTIPLY)
{
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "from", 4))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_STRING_LITERAL)
{
scanner_raise_error (context_p);
}
return SCAN_NEXT_TOKEN;
}
if (context_p->token.type == LEXER_LEFT_BRACE)
{
lexer_next_token (context_p);
while (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (lexer_token_is_identifier (context_p, "as", 2))
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
}
if (context_p->token.type != LEXER_RIGHT_BRACE)
{
if (context_p->token.type != LEXER_COMMA)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
}
}
lexer_next_token (context_p);
if (!lexer_token_is_identifier (context_p, "from", 4))
{
return SCAN_KEEP_TOKEN;
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_STRING_LITERAL)
{
scanner_raise_error (context_p);
}
return SCAN_NEXT_TOKEN;
}
switch (context_p->token.type)
{
#if ENABLED (JERRY_ES2015)
case LEXER_KEYW_CLASS:
case LEXER_KEYW_LET:
case LEXER_KEYW_CONST:
#endif /* ENABLED (JERRY_ES2015) */
case LEXER_KEYW_VAR:
{
scanner_context_p->active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_IN_EXPORT;
break;
}
}
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
default:
{
break;
}
}
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (type == LEXER_LITERAL
&& context_p->token.lit_location.type == LEXER_IDENT_LITERAL)
{
if (JERRY_UNLIKELY (lexer_check_next_character (context_p, LIT_CHAR_COLON)))
{
lexer_consume_next_character (context_p);
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_NEXT_TOKEN;
}
JERRY_ASSERT (context_p->token.flags & LEXER_NO_SKIP_SPACES);
#if ENABLED (JERRY_ES2015)
/* The colon needs to be checked first because the parser also checks
* it first, and this check skips the spaces which affects source_p. */
if (JERRY_UNLIKELY (lexer_check_arrow (context_p)))
{
scanner_scan_simple_arrow (context_p, scanner_context_p, context_p->source_p);
return SCAN_KEEP_TOKEN;
}
if (JERRY_UNLIKELY (lexer_token_is_let (context_p)))
{
lexer_lit_location_t let_literal = context_p->token.lit_location;
const uint8_t *source_p = context_p->source_p;
lexer_next_token (context_p);
type = (lexer_token_type_t) context_p->token.type;
if (type == LEXER_LEFT_SQUARE
|| type == LEXER_LEFT_BRACE
|| (type == LEXER_LITERAL && context_p->token.lit_location.type == LEXER_IDENT_LITERAL))
{
scanner_context_p->mode = SCAN_MODE_VAR_STATEMENT;
parser_stack_push_uint8 (context_p, SCAN_STACK_LET);
return SCAN_KEEP_TOKEN;
}
scanner_info_t *info_p = scanner_insert_info (context_p, source_p, sizeof (scanner_info_t));
info_p->type = SCANNER_TYPE_LET_EXPRESSION;
lexer_lit_location_t *lit_location_p = scanner_add_custom_literal (context_p,
scanner_context_p->active_literal_pool_p,
&let_literal);
lit_location_p->type |= SCANNER_LITERAL_IS_USED;
if (scanner_context_p->active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH)
{
lit_location_p->type |= SCANNER_LITERAL_NO_REG;
}
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_KEEP_TOKEN;
}
if (JERRY_UNLIKELY (lexer_token_is_async (context_p)))
{
scanner_context_p->async_source_p = context_p->source_p;
if (scanner_check_async_function (context_p, scanner_context_p))
{
scanner_context_p->mode = SCAN_MODE_STATEMENT;
}
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
scanner_add_reference (context_p, scanner_context_p);
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
return SCAN_KEEP_TOKEN;
} /* scanner_scan_statement */
/**
* Scan statement terminator.
*
* @return SCAN_NEXT_TOKEN to read the next token, or SCAN_KEEP_TOKEN to do nothing
*/
static scan_return_types_t
scanner_scan_statement_end (parser_context_t *context_p, /**< context */
scanner_context_t *scanner_context_p, /**< scanner context */
lexer_token_type_t type) /**< current token type */
{
bool terminator_found = false;
if (type == LEXER_SEMICOLON)
{
lexer_next_token (context_p);
terminator_found = true;
}
while (true)
{
type = (lexer_token_type_t) context_p->token.type;
switch (context_p->stack_top_uint8)
{
case SCAN_STACK_SCRIPT:
case SCAN_STACK_SCRIPT_FUNCTION:
{
if (type == LEXER_EOS)
{
return SCAN_NEXT_TOKEN;
}
break;
}
case SCAN_STACK_BLOCK_STATEMENT:
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_CLASS_STATEMENT:
#endif /* ENABLED (JERRY_ES2015) */
case SCAN_STACK_FUNCTION_STATEMENT:
{
if (type != LEXER_RIGHT_BRACE)
{
break;
}
#if ENABLED (JERRY_ES2015)
if (context_p->stack_top_uint8 != SCAN_STACK_CLASS_STATEMENT)
{
scanner_pop_literal_pool (context_p, scanner_context_p);
}
#else /* !ENABLED (JERRY_ES2015) */
if (context_p->stack_top_uint8 == SCAN_STACK_FUNCTION_STATEMENT)
{
scanner_pop_literal_pool (context_p, scanner_context_p);
}
#endif /* ENABLED (JERRY_ES2015) */
terminator_found = true;
parser_stack_pop_uint8 (context_p);
lexer_next_token (context_p);
continue;
}
case SCAN_STACK_FUNCTION_EXPRESSION:
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_FUNCTION_ARROW:
#endif /* ENABLED (JERRY_ES2015) */
{
if (type != LEXER_RIGHT_BRACE)
{
break;
}
scanner_context_p->mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
#if ENABLED (JERRY_ES2015)
if (context_p->stack_top_uint8 == SCAN_STACK_FUNCTION_ARROW)
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
}
#endif /* ENABLED (JERRY_ES2015) */
scanner_pop_literal_pool (context_p, scanner_context_p);
parser_stack_pop_uint8 (context_p);
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_FUNCTION_PROPERTY:
{
if (type != LEXER_RIGHT_BRACE)
{
break;
}
scanner_pop_literal_pool (context_p, scanner_context_p);
parser_stack_pop_uint8 (context_p);
#if ENABLED (JERRY_ES2015)
if (context_p->stack_top_uint8 == SCAN_STACK_EXPLICIT_CLASS_CONSTRUCTOR
|| context_p->stack_top_uint8 == SCAN_STACK_IMPLICIT_CLASS_CONSTRUCTOR)
{
scanner_context_p->mode = SCAN_MODE_CLASS_METHOD;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
JERRY_ASSERT (context_p->stack_top_uint8 == SCAN_STACK_OBJECT_LITERAL);
lexer_next_token (context_p);
if (context_p->token.type == LEXER_RIGHT_BRACE)
{
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
return SCAN_KEEP_TOKEN;
}
if (context_p->token.type != LEXER_COMMA)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_PROPERTY_NAME;
return SCAN_KEEP_TOKEN;
}
case SCAN_STACK_SWITCH_BLOCK:
{
if (type != LEXER_RIGHT_BRACE)
{
break;
}
scanner_switch_statement_t switch_statement;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &switch_statement, sizeof (scanner_switch_statement_t));
scanner_context_p->active_switch_statement = switch_statement;
#if ENABLED (JERRY_ES2015)
scanner_pop_literal_pool (context_p, scanner_context_p);
#endif /* ENABLED (JERRY_ES2015) */
terminator_found = true;
lexer_next_token (context_p);
continue;
}
case SCAN_STACK_IF_STATEMENT:
{
parser_stack_pop_uint8 (context_p);
if (type == LEXER_KEYW_ELSE
&& (terminator_found || (context_p->token.flags & LEXER_WAS_NEWLINE)))
{
#if ENABLED (JERRY_ES2015)
scanner_check_function_after_if (context_p, scanner_context_p);
return SCAN_KEEP_TOKEN;
#else /* !ENABLED (JERRY_ES2015) */
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_NEXT_TOKEN;
#endif /* ENABLED (JERRY_ES2015) */
}
continue;
}
case SCAN_STACK_WITH_STATEMENT:
{
scanner_literal_pool_t *literal_pool_p = scanner_context_p->active_literal_pool_p;
JERRY_ASSERT (literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH);
parser_stack_pop_uint8 (context_p);
if (context_p->stack_top_uint8 == 0)
{
literal_pool_p->status_flags &= (uint16_t) ~SCANNER_LITERAL_POOL_IN_WITH;
}
parser_stack_pop_uint8 (context_p);
continue;
}
case SCAN_STACK_DO_STATEMENT:
{
parser_stack_pop_uint8 (context_p);
if (type != LEXER_KEYW_WHILE
|| (!terminator_found && !(context_p->token.flags & LEXER_WAS_NEWLINE)))
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
parser_stack_push_uint8 (context_p, SCAN_STACK_DO_EXPRESSION);
scanner_context_p->mode = SCAN_MODE_PRIMARY_EXPRESSION;
return SCAN_NEXT_TOKEN;
}
case SCAN_STACK_DO_EXPRESSION:
{
parser_stack_pop_uint8 (context_p);
terminator_found = true;
continue;
}
#if ENABLED (JERRY_ES2015)
case SCAN_STACK_PRIVATE_BLOCK_EARLY:
{
parser_list_iterator_t literal_iterator;
lexer_lit_location_t *literal_p;
parser_list_iterator_init (&scanner_context_p->active_literal_pool_p->literal_pool, &literal_iterator);
while ((literal_p = (lexer_lit_location_t *) parser_list_iterator_next (&literal_iterator)) != NULL)
{
if ((literal_p->type & (SCANNER_LITERAL_IS_LET | SCANNER_LITERAL_IS_CONST))
&& literal_p->type & SCANNER_LITERAL_NO_REG)
{
literal_p->type |= SCANNER_LITERAL_EARLY_CREATE;
}
}
/* FALLTHRU */
}
case SCAN_STACK_PRIVATE_BLOCK:
{
parser_stack_pop_uint8 (context_p);
scanner_pop_literal_pool (context_p, scanner_context_p);
continue;
}
#endif /* ENABLED (JERRY_ES2015) */
default:
{
JERRY_ASSERT (context_p->stack_top_uint8 == SCAN_STACK_TRY_STATEMENT
|| context_p->stack_top_uint8 == SCAN_STACK_CATCH_STATEMENT);
if (type != LEXER_RIGHT_BRACE)
{
break;
}
uint8_t stack_top = context_p->stack_top_uint8;
parser_stack_pop_uint8 (context_p);
lexer_next_token (context_p);
#if ENABLED (JERRY_ES2015)
scanner_pop_literal_pool (context_p, scanner_context_p);
#else /* !ENABLED (JERRY_ES2015) */
if (stack_top == SCAN_STACK_CATCH_STATEMENT)
{
scanner_pop_literal_pool (context_p, scanner_context_p);
}
#endif /* ENABLED (JERRY_ES2015) */
/* A finally statement is optional after a try or catch statement. */
if (context_p->token.type == LEXER_KEYW_FINALLY)
{
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_BRACE)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p,
scanner_context_p,
SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = context_p->source_p;
#endif /* ENABLED (JERRY_ES2015) */
parser_stack_push_uint8 (context_p, SCAN_STACK_BLOCK_STATEMENT);
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
return SCAN_NEXT_TOKEN;
}
if (stack_top == SCAN_STACK_CATCH_STATEMENT)
{
terminator_found = true;
continue;
}
/* A catch statement must be present after a try statement unless a finally is provided. */
if (context_p->token.type != LEXER_KEYW_CATCH)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
scanner_literal_pool_t *literal_pool_p;
literal_pool_p = scanner_push_literal_pool (context_p, scanner_context_p, SCANNER_LITERAL_POOL_BLOCK);
literal_pool_p->source_p = context_p->source_p;
lexer_next_token (context_p);
parser_stack_push_uint8 (context_p, SCAN_STACK_CATCH_STATEMENT);
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_LEFT_SQUARE || context_p->token.type == LEXER_LEFT_BRACE)
{
scanner_push_destructuring_pattern (context_p, scanner_context_p, SCANNER_BINDING_CATCH, false);
if (context_p->token.type == LEXER_LEFT_SQUARE)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL);
scanner_context_p->mode = SCAN_MODE_BINDING;
return SCAN_NEXT_TOKEN;
}
parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL);
scanner_context_p->mode = SCAN_MODE_PROPERTY_NAME;
return SCAN_KEEP_TOKEN;
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *lit_location_p = scanner_add_literal (context_p, scanner_context_p);
lit_location_p->type |= SCANNER_LITERAL_IS_LOCAL;
lexer_next_token (context_p);
if (context_p->token.type != LEXER_RIGHT_PAREN)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_BRACE)
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_STATEMENT_OR_TERMINATOR;
return SCAN_NEXT_TOKEN;
}
}
if (!terminator_found && !(context_p->token.flags & LEXER_WAS_NEWLINE))
{
scanner_raise_error (context_p);
}
scanner_context_p->mode = SCAN_MODE_STATEMENT;
return SCAN_KEEP_TOKEN;
}
} /* scanner_scan_statement_end */
/**
* Scan the whole source code.
*/
void JERRY_ATTR_NOINLINE
scanner_scan_all (parser_context_t *context_p, /**< context */
const uint8_t *arg_list_p, /**< function argument list */
const uint8_t *arg_list_end_p, /**< end of argument list */
const uint8_t *source_p, /**< valid UTF-8 source code */
const uint8_t *source_end_p) /**< end of source code */
{
scanner_context_t scanner_context;
#if ENABLED (JERRY_PARSER_DUMP_BYTE_CODE)
if (context_p->is_show_opcodes)
{
JERRY_DEBUG_MSG ("\n--- Scanning start ---\n\n");
}
#endif /* ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) */
scanner_context.context_status_flags = context_p->status_flags;
scanner_context.status_flags = SCANNER_CONTEXT_NO_FLAGS;
#if ENABLED (JERRY_DEBUGGER)
if (JERRY_CONTEXT (debugger_flags) & JERRY_DEBUGGER_CONNECTED)
{
scanner_context.status_flags |= SCANNER_CONTEXT_DEBUGGER_ENABLED;
}
#endif /* ENABLED (JERRY_DEBUGGER) */
#if ENABLED (JERRY_ES2015)
scanner_context.binding_type = SCANNER_BINDING_NONE;
scanner_context.active_binding_list_p = NULL;
#endif /* ENABLED (JERRY_ES2015) */
scanner_context.active_literal_pool_p = NULL;
scanner_context.active_switch_statement.last_case_p = NULL;
scanner_context.end_arguments_p = NULL;
#if ENABLED (JERRY_ES2015)
scanner_context.async_source_p = NULL;
#endif /* ENABLED (JERRY_ES2015) */
/* This assignment must be here because of Apple compilers. */
context_p->u.scanner_context_p = &scanner_context;
parser_stack_init (context_p);
PARSER_TRY (context_p->try_buffer)
{
context_p->line = 1;
context_p->column = 1;
if (arg_list_p == NULL)
{
context_p->source_p = source_p;
context_p->source_end_p = source_end_p;
uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION_WITHOUT_ARGUMENTS | SCANNER_LITERAL_POOL_CAN_EVAL;
if (context_p->status_flags & PARSER_IS_STRICT)
{
status_flags |= SCANNER_LITERAL_POOL_IS_STRICT;
}
scanner_literal_pool_t *literal_pool_p = scanner_push_literal_pool (context_p, &scanner_context, status_flags);
literal_pool_p->source_p = source_p;
parser_stack_push_uint8 (context_p, SCAN_STACK_SCRIPT);
lexer_next_token (context_p);
scanner_check_directives (context_p, &scanner_context);
}
else
{
context_p->source_p = arg_list_p;
context_p->source_end_p = arg_list_end_p;
uint16_t status_flags = SCANNER_LITERAL_POOL_FUNCTION;
if (context_p->status_flags & PARSER_IS_STRICT)
{
status_flags |= SCANNER_LITERAL_POOL_IS_STRICT;
}
#if ENABLED (JERRY_ES2015)
if (context_p->status_flags & PARSER_IS_GENERATOR_FUNCTION)
{
status_flags |= SCANNER_LITERAL_POOL_GENERATOR;
}
#endif /* ENABLED (JERRY_ES2015) */
scanner_push_literal_pool (context_p, &scanner_context, status_flags);
scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS;
parser_stack_push_uint8 (context_p, SCAN_STACK_SCRIPT_FUNCTION);
/* Faking the first token. */
context_p->token.type = LEXER_LEFT_PAREN;
}
while (true)
{
lexer_token_type_t type = (lexer_token_type_t) context_p->token.type;
scan_stack_modes_t stack_top = (scan_stack_modes_t) context_p->stack_top_uint8;
switch (scanner_context.mode)
{
case SCAN_MODE_PRIMARY_EXPRESSION:
{
if (type == LEXER_ADD
|| type == LEXER_SUBTRACT
|| LEXER_IS_UNARY_OP_TOKEN (type))
{
break;
}
/* FALLTHRU */
}
case SCAN_MODE_PRIMARY_EXPRESSION_AFTER_NEW:
{
if (scanner_scan_primary_expression (context_p, &scanner_context, type, stack_top) != SCAN_NEXT_TOKEN)
{
continue;
}
break;
}
#if ENABLED (JERRY_ES2015)
case SCAN_MODE_CLASS_DECLARATION:
{
if (context_p->token.type == LEXER_KEYW_EXTENDS)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_CLASS_EXTENDS);
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
else if (context_p->token.type != LEXER_LEFT_BRACE)
{
scanner_raise_error (context_p);
}
scanner_context.mode = SCAN_MODE_CLASS_METHOD;
/* FALLTHRU */
}
case SCAN_MODE_CLASS_METHOD:
{
JERRY_ASSERT (stack_top == SCAN_STACK_IMPLICIT_CLASS_CONSTRUCTOR
|| stack_top == SCAN_STACK_EXPLICIT_CLASS_CONSTRUCTOR);
lexer_skip_empty_statements (context_p);
lexer_scan_identifier (context_p);
if (context_p->token.type == LEXER_RIGHT_BRACE)
{
scanner_source_start_t source_start;
parser_stack_pop_uint8 (context_p);
if (stack_top == SCAN_STACK_IMPLICIT_CLASS_CONSTRUCTOR)
{
parser_stack_pop (context_p, &source_start, sizeof (scanner_source_start_t));
}
stack_top = context_p->stack_top_uint8;
JERRY_ASSERT (stack_top == SCAN_STACK_CLASS_STATEMENT || stack_top == SCAN_STACK_CLASS_EXPRESSION);
if (stack_top == SCAN_STACK_CLASS_STATEMENT)
{
/* The token is kept to disallow consuming a semicolon after it. */
scanner_context.mode = SCAN_MODE_STATEMENT_END;
continue;
}
scanner_context.mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
parser_stack_pop_uint8 (context_p);
break;
}
if (context_p->token.type == LEXER_LITERAL
&& LEXER_IS_IDENT_OR_STRING (context_p->token.lit_location.type)
&& lexer_compare_literal_to_string (context_p, "constructor", 11))
{
if (stack_top == SCAN_STACK_IMPLICIT_CLASS_CONSTRUCTOR)
{
scanner_source_start_t source_start;
parser_stack_pop_uint8 (context_p);
parser_stack_pop (context_p, &source_start, sizeof (scanner_source_start_t));
scanner_info_t *info_p = scanner_insert_info (context_p, source_start.source_p, sizeof (scanner_info_t));
info_p->type = SCANNER_TYPE_CLASS_CONSTRUCTOR;
parser_stack_push_uint8 (context_p, SCAN_STACK_EXPLICIT_CLASS_CONSTRUCTOR);
}
}
if (lexer_token_is_identifier (context_p, "static", 6))
{
lexer_scan_identifier (context_p);
}
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PROPERTY);
scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS;
uint16_t literal_pool_flags = SCANNER_LITERAL_POOL_FUNCTION;
if (lexer_token_is_identifier (context_p, "get", 3)
|| lexer_token_is_identifier (context_p, "set", 3))
{
lexer_scan_identifier (context_p);
if (context_p->token.type == LEXER_LEFT_PAREN)
{
scanner_push_literal_pool (context_p, &scanner_context, SCANNER_LITERAL_POOL_FUNCTION);
continue;
}
}
else if (lexer_token_is_identifier (context_p, "async", 5))
{
lexer_scan_identifier (context_p);
if (context_p->token.type == LEXER_LEFT_PAREN)
{
scanner_push_literal_pool (context_p, &scanner_context, SCANNER_LITERAL_POOL_FUNCTION);
continue;
}
literal_pool_flags |= SCANNER_LITERAL_POOL_ASYNC;
if (context_p->token.type == LEXER_MULTIPLY)
{
lexer_scan_identifier (context_p);
literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR;
}
}
else if (context_p->token.type == LEXER_MULTIPLY)
{
lexer_scan_identifier (context_p);
literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR;
}
if (context_p->token.type == LEXER_LEFT_SQUARE)
{
parser_stack_push_uint8 (context_p, SCANNER_FROM_LITERAL_POOL_TO_COMPUTED (literal_pool_flags));
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
if (context_p->token.type != LEXER_LITERAL)
{
scanner_raise_error (context_p);
}
if (literal_pool_flags & SCANNER_LITERAL_POOL_GENERATOR)
{
context_p->status_flags |= PARSER_IS_GENERATOR_FUNCTION;
}
scanner_push_literal_pool (context_p, &scanner_context, literal_pool_flags);
lexer_next_token (context_p);
continue;
}
#endif /* ENABLED (JERRY_ES2015) */
case SCAN_MODE_POST_PRIMARY_EXPRESSION:
{
if (scanner_scan_post_primary_expression (context_p, &scanner_context, type, stack_top))
{
break;
}
type = (lexer_token_type_t) context_p->token.type;
/* FALLTHRU */
}
case SCAN_MODE_PRIMARY_EXPRESSION_END:
{
if (scanner_scan_primary_expression_end (context_p, &scanner_context, type, stack_top) != SCAN_NEXT_TOKEN)
{
continue;
}
break;
}
case SCAN_MODE_STATEMENT_OR_TERMINATOR:
{
if (type == LEXER_RIGHT_BRACE || type == LEXER_EOS)
{
scanner_context.mode = SCAN_MODE_STATEMENT_END;
continue;
}
/* FALLTHRU */
}
case SCAN_MODE_STATEMENT:
{
if (scanner_scan_statement (context_p, &scanner_context, type, stack_top) != SCAN_NEXT_TOKEN)
{
continue;
}
break;
}
case SCAN_MODE_STATEMENT_END:
{
if (scanner_scan_statement_end (context_p, &scanner_context, type) != SCAN_NEXT_TOKEN)
{
continue;
}
if (context_p->token.type == LEXER_EOS)
{
goto scan_completed;
}
break;
}
case SCAN_MODE_VAR_STATEMENT:
{
#if ENABLED (JERRY_ES2015)
if (type == LEXER_LEFT_SQUARE || type == LEXER_LEFT_BRACE)
{
uint8_t binding_type = SCANNER_BINDING_VAR;
if (stack_top == SCAN_STACK_LET || stack_top == SCAN_STACK_FOR_LET_START)
{
binding_type = SCANNER_BINDING_LET;
}
else if (stack_top == SCAN_STACK_CONST || stack_top == SCAN_STACK_FOR_CONST_START)
{
binding_type = SCANNER_BINDING_CONST;
}
scanner_push_destructuring_pattern (context_p, &scanner_context, binding_type, false);
if (type == LEXER_LEFT_SQUARE)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL);
scanner_context.mode = SCAN_MODE_BINDING;
break;
}
parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL);
scanner_context.mode = SCAN_MODE_PROPERTY_NAME;
continue;
}
#endif /* ENABLED (JERRY_ES2015) */
if (type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, &scanner_context);
#if ENABLED (JERRY_ES2015)
if (stack_top != SCAN_STACK_VAR && stack_top != SCAN_STACK_FOR_VAR_START)
{
scanner_detect_invalid_let (context_p, literal_p);
if (stack_top == SCAN_STACK_LET || stack_top == SCAN_STACK_FOR_LET_START)
{
literal_p->type |= SCANNER_LITERAL_IS_LET;
}
else
{
JERRY_ASSERT (stack_top == SCAN_STACK_CONST || stack_top == SCAN_STACK_FOR_CONST_START);
literal_p->type |= SCANNER_LITERAL_IS_CONST;
}
lexer_next_token (context_p);
if (literal_p->type & SCANNER_LITERAL_IS_USED)
{
literal_p->type |= SCANNER_LITERAL_EARLY_CREATE;
}
else if (context_p->token.type == LEXER_ASSIGN)
{
scanner_binding_literal_t binding_literal;
binding_literal.literal_p = literal_p;
parser_stack_push (context_p, &binding_literal, sizeof (scanner_binding_literal_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_BINDING_INIT);
}
}
else
{
if (!(literal_p->type & SCANNER_LITERAL_IS_VAR))
{
scanner_detect_invalid_var (context_p, &scanner_context, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_VAR;
if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH)
{
literal_p->type |= SCANNER_LITERAL_NO_REG;
}
}
lexer_next_token (context_p);
}
#else /* !ENABLED (JERRY_ES2015) */
literal_p->type |= SCANNER_LITERAL_IS_VAR;
if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH)
{
literal_p->type |= SCANNER_LITERAL_NO_REG;
}
lexer_next_token (context_p);
#endif /* ENABLED (JERRY_ES2015) */
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_EXPORT)
{
literal_p->type |= SCANNER_LITERAL_NO_REG;
}
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
switch (context_p->token.type)
{
case LEXER_ASSIGN:
{
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
/* FALLTHRU */
}
case LEXER_COMMA:
{
lexer_next_token (context_p);
continue;
}
}
if (SCANNER_IS_FOR_START (stack_top))
{
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
JERRY_ASSERT (!(scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_EXPORT));
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
if (context_p->token.type != LEXER_SEMICOLON
&& context_p->token.type != LEXER_KEYW_IN
&& !SCANNER_IDENTIFIER_IS_OF ())
{
scanner_raise_error (context_p);
}
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
continue;
}
#if ENABLED (JERRY_ES2015)
JERRY_ASSERT (stack_top == SCAN_STACK_VAR || stack_top == SCAN_STACK_LET || stack_top == SCAN_STACK_CONST);
#else /* !ENABLED (JERRY_ES2015) */
JERRY_ASSERT (stack_top == SCAN_STACK_VAR);
#endif /* ENABLED (JERRY_ES2015) */
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
scanner_context.active_literal_pool_p->status_flags &= (uint16_t) ~SCANNER_LITERAL_POOL_IN_EXPORT;
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
scanner_context.mode = SCAN_MODE_STATEMENT_END;
parser_stack_pop_uint8 (context_p);
continue;
}
case SCAN_MODE_FUNCTION_ARGUMENTS:
{
JERRY_ASSERT (stack_top == SCAN_STACK_SCRIPT_FUNCTION
|| stack_top == SCAN_STACK_FUNCTION_STATEMENT
|| stack_top == SCAN_STACK_FUNCTION_EXPRESSION
|| stack_top == SCAN_STACK_FUNCTION_PROPERTY);
scanner_literal_pool_t *literal_pool_p = scanner_context.active_literal_pool_p;
JERRY_ASSERT (literal_pool_p != NULL && (literal_pool_p->status_flags & SCANNER_LITERAL_POOL_FUNCTION));
literal_pool_p->source_p = context_p->source_p;
#if ENABLED (JERRY_ES2015)
if (JERRY_UNLIKELY (scanner_context.async_source_p != NULL))
{
literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ASYNC;
literal_pool_p->source_p = scanner_context.async_source_p;
scanner_context.async_source_p = NULL;
}
#endif /* ENABLED (JERRY_ES2015) */
if (type != LEXER_LEFT_PAREN)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
#if ENABLED (JERRY_ES2015)
/* FALLTHRU */
}
case SCAN_MODE_CONTINUE_FUNCTION_ARGUMENTS:
{
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type != LEXER_RIGHT_PAREN && context_p->token.type != LEXER_EOS)
{
#if ENABLED (JERRY_ES2015)
lexer_lit_location_t *argument_literal_p;
#endif /* ENABLED (JERRY_ES2015) */
while (true)
{
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_THREE_DOTS)
{
scanner_context.active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ARGUMENTS_UNMAPPED;
lexer_next_token (context_p);
}
if (context_p->token.type == LEXER_LEFT_SQUARE || context_p->token.type == LEXER_LEFT_BRACE)
{
argument_literal_p = NULL;
break;
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
argument_literal_p = scanner_append_argument (context_p, &scanner_context);
#else /* !ENABLED (JERRY_ES2015) */
scanner_append_argument (context_p, &scanner_context);
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
if (context_p->token.type != LEXER_COMMA)
{
break;
}
lexer_next_token (context_p);
}
#if ENABLED (JERRY_ES2015)
if (argument_literal_p == NULL)
{
scanner_context.active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ARGUMENTS_UNMAPPED;
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PARAMETERS);
scanner_append_hole (context_p, &scanner_context);
scanner_push_destructuring_pattern (context_p, &scanner_context, SCANNER_BINDING_ARG, false);
if (context_p->token.type == LEXER_LEFT_SQUARE)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL);
scanner_context.mode = SCAN_MODE_BINDING;
break;
}
parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL);
scanner_context.mode = SCAN_MODE_PROPERTY_NAME;
continue;
}
if (context_p->token.type == LEXER_ASSIGN)
{
scanner_context.active_literal_pool_p->status_flags |= SCANNER_LITERAL_POOL_ARGUMENTS_UNMAPPED;
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PARAMETERS);
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
if (argument_literal_p->type & SCANNER_LITERAL_IS_USED)
{
JERRY_ASSERT (argument_literal_p->type & SCANNER_LITERAL_EARLY_CREATE);
break;
}
scanner_binding_literal_t binding_literal;
binding_literal.literal_p = argument_literal_p;
parser_stack_push (context_p, &binding_literal, sizeof (scanner_binding_literal_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_BINDING_INIT);
break;
}
#endif /* ENABLED (JERRY_ES2015) */
}
if (context_p->token.type == LEXER_EOS && stack_top == SCAN_STACK_SCRIPT_FUNCTION)
{
/* End of argument parsing. */
scanner_info_t *scanner_info_p = (scanner_info_t *) scanner_malloc (context_p, sizeof (scanner_info_t));
scanner_info_p->next_p = context_p->next_scanner_info_p;
scanner_info_p->source_p = NULL;
scanner_info_p->type = SCANNER_TYPE_END_ARGUMENTS;
scanner_context.end_arguments_p = scanner_info_p;
context_p->next_scanner_info_p = scanner_info_p;
context_p->source_p = source_p;
context_p->source_end_p = source_end_p;
context_p->line = 1;
context_p->column = 1;
scanner_filter_arguments (context_p, &scanner_context);
lexer_next_token (context_p);
scanner_check_directives (context_p, &scanner_context);
continue;
}
if (context_p->token.type != LEXER_RIGHT_PAREN)
{
scanner_raise_error (context_p);
}
lexer_next_token (context_p);
if (context_p->token.type != LEXER_LEFT_BRACE)
{
scanner_raise_error (context_p);
}
scanner_filter_arguments (context_p, &scanner_context);
lexer_next_token (context_p);
scanner_check_directives (context_p, &scanner_context);
continue;
}
case SCAN_MODE_PROPERTY_NAME:
{
JERRY_ASSERT (stack_top == SCAN_STACK_OBJECT_LITERAL);
if (lexer_scan_identifier (context_p))
{
lexer_check_property_modifier (context_p);
}
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_LEFT_SQUARE)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_COMPUTED_PROPERTY);
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type == LEXER_RIGHT_BRACE)
{
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
continue;
}
if (context_p->token.type == LEXER_PROPERTY_GETTER
#if ENABLED (JERRY_ES2015)
|| context_p->token.type == LEXER_KEYW_ASYNC
|| context_p->token.type == LEXER_MULTIPLY
#endif /* ENABLED (JERRY_ES2015) */
|| context_p->token.type == LEXER_PROPERTY_SETTER)
{
uint16_t literal_pool_flags = SCANNER_LITERAL_POOL_FUNCTION;
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_MULTIPLY)
{
literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR;
}
else if (context_p->token.type == LEXER_KEYW_ASYNC)
{
literal_pool_flags |= SCANNER_LITERAL_POOL_ASYNC;
if (lexer_consume_generator (context_p))
{
literal_pool_flags |= SCANNER_LITERAL_POOL_GENERATOR;
}
}
#endif /* ENABLED (JERRY_ES2015) */
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PROPERTY);
lexer_scan_identifier (context_p);
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_LEFT_SQUARE)
{
parser_stack_push_uint8 (context_p, SCANNER_FROM_LITERAL_POOL_TO_COMPUTED (literal_pool_flags));
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type != LEXER_LITERAL)
{
scanner_raise_error (context_p);
}
scanner_push_literal_pool (context_p, &scanner_context, literal_pool_flags);
scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS;
break;
}
if (context_p->token.type != LEXER_LITERAL)
{
scanner_raise_error (context_p);
}
#if ENABLED (JERRY_ES2015)
parser_line_counter_t start_line = context_p->token.line;
parser_line_counter_t start_column = context_p->token.column;
bool is_ident = (context_p->token.lit_location.type == LEXER_IDENT_LITERAL);
#endif /* ENABLED (JERRY_ES2015) */
lexer_next_token (context_p);
#if ENABLED (JERRY_ES2015)
if (context_p->token.type == LEXER_LEFT_PAREN)
{
scanner_push_literal_pool (context_p, &scanner_context, SCANNER_LITERAL_POOL_FUNCTION);
parser_stack_push_uint8 (context_p, SCAN_STACK_FUNCTION_PROPERTY);
scanner_context.mode = SCAN_MODE_FUNCTION_ARGUMENTS;
continue;
}
if (is_ident
&& (context_p->token.type == LEXER_COMMA
|| context_p->token.type == LEXER_RIGHT_BRACE
|| context_p->token.type == LEXER_ASSIGN))
{
context_p->source_p = context_p->token.lit_location.char_p;
context_p->line = start_line;
context_p->column = start_column;
lexer_next_token (context_p);
JERRY_ASSERT (context_p->token.type != LEXER_LITERAL
|| context_p->token.lit_location.type == LEXER_IDENT_LITERAL);
if (context_p->token.type != LEXER_LITERAL)
{
scanner_raise_error (context_p);
}
if (scanner_context.binding_type != SCANNER_BINDING_NONE)
{
scanner_context.mode = SCAN_MODE_BINDING;
continue;
}
scanner_add_reference (context_p, &scanner_context);
lexer_next_token (context_p);
if (context_p->token.type == LEXER_ASSIGN)
{
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION_END;
continue;
}
#endif /* ENABLED (JERRY_ES2015) */
if (context_p->token.type != LEXER_COLON)
{
scanner_raise_error (context_p);
}
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
#if ENABLED (JERRY_ES2015)
if (scanner_context.binding_type != SCANNER_BINDING_NONE)
{
scanner_context.mode = SCAN_MODE_BINDING;
}
#endif /* ENABLED (JERRY_ES2015) */
break;
}
#if ENABLED (JERRY_ES2015)
case SCAN_MODE_BINDING:
{
JERRY_ASSERT (scanner_context.binding_type == SCANNER_BINDING_VAR
|| scanner_context.binding_type == SCANNER_BINDING_LET
|| scanner_context.binding_type == SCANNER_BINDING_CATCH
|| scanner_context.binding_type == SCANNER_BINDING_CONST
|| scanner_context.binding_type == SCANNER_BINDING_ARG
|| scanner_context.binding_type == SCANNER_BINDING_ARROW_ARG);
if (type == LEXER_THREE_DOTS)
{
lexer_next_token (context_p);
type = (lexer_token_type_t) context_p->token.type;
}
if (type == LEXER_LEFT_SQUARE || type == LEXER_LEFT_BRACE)
{
scanner_push_destructuring_pattern (context_p, &scanner_context, scanner_context.binding_type, true);
if (type == LEXER_LEFT_SQUARE)
{
parser_stack_push_uint8 (context_p, SCAN_STACK_ARRAY_LITERAL);
break;
}
parser_stack_push_uint8 (context_p, SCAN_STACK_OBJECT_LITERAL);
scanner_context.mode = SCAN_MODE_PROPERTY_NAME;
continue;
}
if (type != LEXER_LITERAL || context_p->token.lit_location.type != LEXER_IDENT_LITERAL)
{
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
continue;
}
lexer_lit_location_t *literal_p = scanner_add_literal (context_p, &scanner_context);
scanner_context.mode = SCAN_MODE_POST_PRIMARY_EXPRESSION;
if (scanner_context.binding_type == SCANNER_BINDING_VAR)
{
if (!(literal_p->type & SCANNER_LITERAL_IS_VAR))
{
scanner_detect_invalid_var (context_p, &scanner_context, literal_p);
literal_p->type |= SCANNER_LITERAL_IS_VAR;
if (scanner_context.active_literal_pool_p->status_flags & SCANNER_LITERAL_POOL_IN_WITH)
{
literal_p->type |= SCANNER_LITERAL_NO_REG;
}
}
break;
}
if (scanner_context.binding_type == SCANNER_BINDING_ARROW_ARG)
{
literal_p->type |= SCANNER_LITERAL_IS_ARG | SCANNER_LITERAL_IS_ARROW_DESTRUCTURED_ARG;
if (literal_p->type & SCANNER_LITERAL_IS_USED)
{
literal_p->type |= SCANNER_LITERAL_EARLY_CREATE;
break;
}
}
else
{
scanner_detect_invalid_let (context_p, literal_p);
if (scanner_context.binding_type <= SCANNER_BINDING_CATCH)
{
JERRY_ASSERT ((scanner_context.binding_type == SCANNER_BINDING_LET)
|| (scanner_context.binding_type == SCANNER_BINDING_CATCH));
literal_p->type |= SCANNER_LITERAL_IS_LET;
}
else
{
literal_p->type |= SCANNER_LITERAL_IS_CONST;
if (scanner_context.binding_type == SCANNER_BINDING_ARG)
{
literal_p->type |= SCANNER_LITERAL_IS_ARG;
if (literal_p->type & SCANNER_LITERAL_IS_USED)
{
literal_p->type |= SCANNER_LITERAL_EARLY_CREATE;
break;
}
}
}
if (literal_p->type & SCANNER_LITERAL_IS_USED)
{
literal_p->type |= SCANNER_LITERAL_EARLY_CREATE;
break;
}
}
scanner_binding_item_t *binding_item_p;
binding_item_p = (scanner_binding_item_t *) scanner_malloc (context_p, sizeof (scanner_binding_item_t));
binding_item_p->next_p = scanner_context.active_binding_list_p->items_p;
binding_item_p->literal_p = literal_p;
scanner_context.active_binding_list_p->items_p = binding_item_p;
lexer_next_token (context_p);
if (context_p->token.type != LEXER_ASSIGN)
{
continue;
}
scanner_binding_literal_t binding_literal;
binding_literal.literal_p = literal_p;
parser_stack_push (context_p, &binding_literal, sizeof (scanner_binding_literal_t));
parser_stack_push_uint8 (context_p, SCAN_STACK_BINDING_INIT);
scanner_context.mode = SCAN_MODE_PRIMARY_EXPRESSION;
break;
}
#endif /* ENABLED (JERRY_ES2015) */
}
lexer_next_token (context_p);
}
scan_completed:
if (context_p->stack_top_uint8 != SCAN_STACK_SCRIPT
&& context_p->stack_top_uint8 != SCAN_STACK_SCRIPT_FUNCTION)
{
scanner_raise_error (context_p);
}
scanner_pop_literal_pool (context_p, &scanner_context);
#if ENABLED (JERRY_ES2015)
JERRY_ASSERT (scanner_context.active_binding_list_p == NULL);
#endif /* ENABLED (JERRY_ES2015) */
JERRY_ASSERT (scanner_context.active_literal_pool_p == NULL);
#ifndef JERRY_NDEBUG
scanner_context.context_status_flags |= PARSER_SCANNING_SUCCESSFUL;
#endif /* !JERRY_NDEBUG */
}
PARSER_CATCH
{
#if ENABLED (JERRY_ES2015)
while (scanner_context.active_binding_list_p != NULL)
{
scanner_pop_binding_list (&scanner_context);
}
#endif /* ENABLED (JERRY_ES2015) */
if (JERRY_UNLIKELY (context_p->error != PARSER_ERR_OUT_OF_MEMORY))
{
/* Ignore the errors thrown by the lexer. */
context_p->error = PARSER_ERR_NO_ERROR;
/* The following code may allocate memory, so it is enclosed in a try/catch. */
PARSER_TRY (context_p->try_buffer)
{
#if ENABLED (JERRY_ES2015)
if (scanner_context.status_flags & SCANNER_CONTEXT_THROW_ERR_ASYNC_FUNCTION)
{
JERRY_ASSERT (scanner_context.async_source_p != NULL);
scanner_info_t *info_p;
info_p = scanner_insert_info (context_p, scanner_context.async_source_p, sizeof (scanner_info_t));
info_p->type = SCANNER_TYPE_ERR_ASYNC_FUNCTION;
}
#endif /* ENABLED (JERRY_ES2015) */
while (scanner_context.active_literal_pool_p != NULL)
{
scanner_pop_literal_pool (context_p, &scanner_context);
}
}
PARSER_CATCH
{
JERRY_ASSERT (context_p->error == PARSER_ERR_OUT_OF_MEMORY);
}
PARSER_TRY_END
}
JERRY_ASSERT (context_p->error == PARSER_ERR_NO_ERROR || context_p->error == PARSER_ERR_OUT_OF_MEMORY);
if (context_p->error == PARSER_ERR_OUT_OF_MEMORY)
{
while (scanner_context.active_literal_pool_p != NULL)
{
scanner_literal_pool_t *literal_pool_p = scanner_context.active_literal_pool_p;
scanner_context.active_literal_pool_p = literal_pool_p->prev_p;
parser_list_free (&literal_pool_p->literal_pool);
scanner_free (literal_pool_p, sizeof (scanner_literal_pool_t));
}
parser_stack_free (context_p);
return;
}
}
PARSER_TRY_END
context_p->status_flags = scanner_context.context_status_flags;
scanner_reverse_info_list (context_p);
#if ENABLED (JERRY_PARSER_DUMP_BYTE_CODE)
if (context_p->is_show_opcodes)
{
scanner_info_t *info_p = context_p->next_scanner_info_p;
const uint8_t *source_start_p = (arg_list_p == NULL) ? source_p : arg_list_p;
while (info_p->type != SCANNER_TYPE_END)
{
const char *name_p = NULL;
bool print_location = false;
switch (info_p->type)
{
case SCANNER_TYPE_END_ARGUMENTS:
{
JERRY_DEBUG_MSG (" END_ARGUMENTS\n");
source_start_p = source_p;
break;
}
case SCANNER_TYPE_FUNCTION:
case SCANNER_TYPE_BLOCK:
{
const uint8_t *prev_source_p = info_p->source_p - 1;
const uint8_t *data_p;
if (info_p->type == SCANNER_TYPE_FUNCTION)
{
data_p = (const uint8_t *) (info_p + 1);
JERRY_DEBUG_MSG (" FUNCTION: flags: 0x%x declarations: %d",
(int) info_p->u8_arg,
(int) info_p->u16_arg);
}
else
{
data_p = (const uint8_t *) (info_p + 1);
JERRY_DEBUG_MSG (" BLOCK:");
}
JERRY_DEBUG_MSG (" source:%d\n", (int) (info_p->source_p - source_start_p));
while (data_p[0] != SCANNER_STREAM_TYPE_END)
{
switch (data_p[0] & SCANNER_STREAM_TYPE_MASK)
{
case SCANNER_STREAM_TYPE_VAR:
{
JERRY_DEBUG_MSG (" VAR ");
break;
}
#if ENABLED (JERRY_ES2015)
case SCANNER_STREAM_TYPE_LET:
{
JERRY_DEBUG_MSG (" LET ");
break;
}
case SCANNER_STREAM_TYPE_CONST:
{
JERRY_DEBUG_MSG (" CONST ");
break;
}
case SCANNER_STREAM_TYPE_LOCAL:
{
JERRY_DEBUG_MSG (" LOCAL ");
break;
}
#endif /* ENABLED (JERRY_ES2015) */
#if ENABLED (JERRY_ES2015_MODULE_SYSTEM)
case SCANNER_STREAM_TYPE_IMPORT:
{
JERRY_DEBUG_MSG (" IMPORT ");
break;
}
#endif /* ENABLED (JERRY_ES2015_MODULE_SYSTEM) */
case SCANNER_STREAM_TYPE_ARG:
{
JERRY_DEBUG_MSG (" ARG ");
break;
}
#if ENABLED (JERRY_ES2015)
case SCANNER_STREAM_TYPE_DESTRUCTURED_ARG:
{
JERRY_DEBUG_MSG (" DESTRUCTURED_ARG ");
break;
}
#endif /* ENABLED (JERRY_ES2015) */
case SCANNER_STREAM_TYPE_ARG_FUNC:
{
JERRY_DEBUG_MSG (" ARG_FUNC ");
break;
}
#if ENABLED (JERRY_ES2015)
case SCANNER_STREAM_TYPE_DESTRUCTURED_ARG_FUNC:
{
JERRY_DEBUG_MSG (" DESTRUCTURED_ARG_FUNC ");
break;
}
#endif /* ENABLED (JERRY_ES2015) */
case SCANNER_STREAM_TYPE_FUNC:
{
JERRY_DEBUG_MSG (" FUNC ");
break;
}
default:
{
JERRY_ASSERT ((data_p[0] & SCANNER_STREAM_TYPE_MASK) == SCANNER_STREAM_TYPE_HOLE);
JERRY_DEBUG_MSG (" HOLE\n");
data_p++;
continue;
}
}
size_t length;
if (!(data_p[0] & SCANNER_STREAM_UINT16_DIFF))
{
if (data_p[2] != 0)
{
prev_source_p += data_p[2];
length = 2 + 1;
}
else
{
memcpy (&prev_source_p, data_p + 2 + 1, sizeof (const uint8_t *));
length = 2 + 1 + sizeof (const uint8_t *);
}
}
else
{
int32_t diff = ((int32_t) data_p[2]) | ((int32_t) data_p[3]) << 8;
if (diff <= UINT8_MAX)
{
diff = -diff;
}
prev_source_p += diff;
length = 2 + 2;
}
#if ENABLED (JERRY_ES2015)
if (data_p[0] & SCANNER_STREAM_EARLY_CREATE)
{
JERRY_ASSERT (data_p[0] & SCANNER_STREAM_NO_REG);
JERRY_DEBUG_MSG ("*");
}
#endif /* ENABLED (JERRY_ES2015) */
if (data_p[0] & SCANNER_STREAM_NO_REG)
{
JERRY_DEBUG_MSG ("* ");
}
JERRY_DEBUG_MSG ("'%.*s'\n", data_p[1], (char *) prev_source_p);
prev_source_p += data_p[1];
data_p += length;
}
break;
}
case SCANNER_TYPE_WHILE:
{
name_p = "WHILE";
print_location = true;
break;
}
case SCANNER_TYPE_FOR:
{
scanner_for_info_t *for_info_p = (scanner_for_info_t *) info_p;
JERRY_DEBUG_MSG (" FOR: source:%d expression:%d[%d:%d] end:%d[%d:%d]\n",
(int) (for_info_p->info.source_p - source_start_p),
(int) (for_info_p->expression_location.source_p - source_start_p),
(int) for_info_p->expression_location.line,
(int) for_info_p->expression_location.column,
(int) (for_info_p->end_location.source_p - source_start_p),
(int) for_info_p->end_location.line,
(int) for_info_p->end_location.column);
break;
}
case SCANNER_TYPE_FOR_IN:
{
name_p = "FOR-IN";
print_location = true;
break;
}
#if ENABLED (JERRY_ES2015)
case SCANNER_TYPE_FOR_OF:
{
name_p = "FOR-OF";
print_location = true;
break;
}
#endif /* ENABLED (JERRY_ES2015) */
case SCANNER_TYPE_SWITCH:
{
JERRY_DEBUG_MSG (" SWITCH: source:%d\n",
(int) (info_p->source_p - source_start_p));
scanner_case_info_t *current_case_p = ((scanner_switch_info_t *) info_p)->case_p;
while (current_case_p != NULL)
{
JERRY_DEBUG_MSG (" CASE: location:%d[%d:%d]\n",
(int) (current_case_p->location.source_p - source_start_p),
(int) current_case_p->location.line,
(int) current_case_p->location.column);
current_case_p = current_case_p->next_p;
}
break;
}
case SCANNER_TYPE_CASE:
{
name_p = "CASE";
print_location = true;
break;
}
#if ENABLED (JERRY_ES2015)
case SCANNER_TYPE_INITIALIZER:
{
name_p = "INITIALIZER";
print_location = true;
break;
}
case SCANNER_TYPE_CLASS_CONSTRUCTOR:
{
JERRY_DEBUG_MSG (" CLASS-CONSTRUCTOR: source:%d\n",
(int) (info_p->source_p - source_start_p));
print_location = false;
break;
}
case SCANNER_TYPE_LET_EXPRESSION:
{
JERRY_DEBUG_MSG (" LET_EXPRESSION: source:%d\n",
(int) (info_p->source_p - source_start_p));
break;
}
case SCANNER_TYPE_ERR_REDECLARED:
{
JERRY_DEBUG_MSG (" ERR_REDECLARED: source:%d\n",
(int) (info_p->source_p - source_start_p));
break;
}
case SCANNER_TYPE_ERR_ASYNC_FUNCTION:
{
JERRY_DEBUG_MSG (" ERR_ASYNC_FUNCTION: source:%d\n",
(int) (info_p->source_p - source_start_p));
break;
}
#endif /* ENABLED (JERRY_ES2015) */
}
if (print_location)
{
scanner_location_info_t *location_info_p = (scanner_location_info_t *) info_p;
JERRY_DEBUG_MSG (" %s: source:%d location:%d[%d:%d]\n",
name_p,
(int) (location_info_p->info.source_p - source_start_p),
(int) (location_info_p->location.source_p - source_start_p),
(int) location_info_p->location.line,
(int) location_info_p->location.column);
}
info_p = info_p->next_p;
}
JERRY_DEBUG_MSG ("\n--- Scanning end ---\n\n");
}
#endif /* ENABLED (JERRY_PARSER_DUMP_BYTE_CODE) */
parser_stack_free (context_p);
} /* scanner_scan_all */
/**
* @}
* @}
* @}
*/
#endif /* ENABLED (JERRY_PARSER) */
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_4032_0 |
crossvul-cpp_data_good_3060_10 | /* Large capacity key type
*
* Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/file.h>
#include <linux/shmem_fs.h>
#include <linux/err.h>
#include <keys/user-type.h>
#include <keys/big_key-type.h>
MODULE_LICENSE("GPL");
/*
* If the data is under this limit, there's no point creating a shm file to
* hold it as the permanently resident metadata for the shmem fs will be at
* least as large as the data.
*/
#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
/*
* big_key defined keys take an arbitrary string as the description and an
* arbitrary blob of data as the payload
*/
struct key_type key_type_big_key = {
.name = "big_key",
.preparse = big_key_preparse,
.free_preparse = big_key_free_preparse,
.instantiate = generic_key_instantiate,
.revoke = big_key_revoke,
.destroy = big_key_destroy,
.describe = big_key_describe,
.read = big_key_read,
};
/*
* Preparse a big key
*/
int big_key_preparse(struct key_preparsed_payload *prep)
{
struct path *path = (struct path *)&prep->payload;
struct file *file;
ssize_t written;
size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
goto error;
/* Set an arbitrary quota */
prep->quotalen = 16;
prep->type_data[1] = (void *)(unsigned long)datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
/* Create a shmem file to store the data in. This will permit the data
* to be swapped out if needed.
*
* TODO: Encrypt the stored data with a temporary key.
*/
file = shmem_kernel_file_setup("", datalen, 0);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto error;
}
written = kernel_write(file, prep->data, prep->datalen, 0);
if (written != datalen) {
ret = written;
if (written >= 0)
ret = -ENOMEM;
goto err_fput;
}
/* Pin the mount and dentry to the key so that we can open it again
* later
*/
*path = file->f_path;
path_get(path);
fput(file);
} else {
/* Just store the data in a buffer */
void *data = kmalloc(datalen, GFP_KERNEL);
if (!data)
return -ENOMEM;
prep->payload[0] = memcpy(data, prep->data, prep->datalen);
}
return 0;
err_fput:
fput(file);
error:
return ret;
}
/*
* Clear preparsement.
*/
void big_key_free_preparse(struct key_preparsed_payload *prep)
{
if (prep->datalen > BIG_KEY_FILE_THRESHOLD) {
struct path *path = (struct path *)&prep->payload;
path_put(path);
} else {
kfree(prep->payload[0]);
}
}
/*
* dispose of the links from a revoked keyring
* - called with the key sem write-locked
*/
void big_key_revoke(struct key *key)
{
struct path *path = (struct path *)&key->payload.data2;
/* clear the quota */
key_payload_reserve(key, 0);
if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD)
vfs_truncate(path, 0);
}
/*
* dispose of the data dangling from the corpse of a big_key key
*/
void big_key_destroy(struct key *key)
{
if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) {
struct path *path = (struct path *)&key->payload.data2;
path_put(path);
path->mnt = NULL;
path->dentry = NULL;
} else {
kfree(key->payload.data);
key->payload.data = NULL;
}
}
/*
* describe the big_key key
*/
void big_key_describe(const struct key *key, struct seq_file *m)
{
unsigned long datalen = key->type_data.x[1];
seq_puts(m, key->description);
if (key_is_instantiated(key))
seq_printf(m, ": %lu [%s]",
datalen,
datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
}
/*
* read the key data
* - the key's semaphore is read-locked
*/
long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
{
unsigned long datalen = key->type_data.x[1];
long ret;
if (!buffer || buflen < datalen)
return datalen;
if (datalen > BIG_KEY_FILE_THRESHOLD) {
struct path *path = (struct path *)&key->payload.data2;
struct file *file;
loff_t pos;
file = dentry_open(path, O_RDONLY, current_cred());
if (IS_ERR(file))
return PTR_ERR(file);
pos = 0;
ret = vfs_read(file, buffer, datalen, &pos);
fput(file);
if (ret >= 0 && ret != datalen)
ret = -EIO;
} else {
ret = datalen;
if (copy_to_user(buffer, key->payload.data, datalen) != 0)
ret = -EFAULT;
}
return ret;
}
/*
* Module stuff
*/
static int __init big_key_init(void)
{
return register_key_type(&key_type_big_key);
}
static void __exit big_key_cleanup(void)
{
unregister_key_type(&key_type_big_key);
}
module_init(big_key_init);
module_exit(big_key_cleanup);
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3060_10 |
crossvul-cpp_data_good_5217_2 | /*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright 2016 Syneto S.R.L. All rights reserved.
*/
/*
* General Structures Layout
* -------------------------
*
* This is a simplified diagram showing the relationship between most of the
* main structures.
*
* +-------------------+
* | SMB_INFO |
* +-------------------+
* |
* |
* v
* +-------------------+ +-------------------+ +-------------------+
* | SESSION |<----->| SESSION |......| SESSION |
* +-------------------+ +-------------------+ +-------------------+
* | |
* | |
* | v
* | +-------------------+ +-------------------+ +-------------------+
* | | USER |<--->| USER |...| USER |
* | +-------------------+ +-------------------+ +-------------------+
* |
* |
* v
* +-------------------+ +-------------------+ +-------------------+
* | TREE |<----->| TREE |......| TREE |
* +-------------------+ +-------------------+ +-------------------+
* | |
* | |
* | v
* | +-------+ +-------+ +-------+
* | | OFILE |<----->| OFILE |......| OFILE |
* | +-------+ +-------+ +-------+
* |
* |
* v
* +-------+ +------+ +------+
* | ODIR |<----->| ODIR |......| ODIR |
* +-------+ +------+ +------+
*
*
* Ofile State Machine
* ------------------
*
* +-------------------------+ T0
* | SMB_OFILE_STATE_OPEN |<----------- Creation/Allocation
* +-------------------------+
* |
* | T1
* |
* v
* +-------------------------+
* | SMB_OFILE_STATE_CLOSING |
* +-------------------------+
* |
* | T2
* |
* v
* +-------------------------+ T3
* | SMB_OFILE_STATE_CLOSED |----------> Deletion/Free
* +-------------------------+
*
* SMB_OFILE_STATE_OPEN
*
* While in this state:
* - The ofile is queued in the list of ofiles of its tree.
* - References will be given out if the ofile is looked up.
*
* SMB_OFILE_STATE_CLOSING
*
* While in this state:
* - The ofile is queued in the list of ofiles of its tree.
* - References will not be given out if the ofile is looked up.
* - The file is closed and the locks held are being released.
* - The resources associated with the ofile remain.
*
* SMB_OFILE_STATE_CLOSED
*
* While in this state:
* - The ofile is queued in the list of ofiles of its tree.
* - References will not be given out if the ofile is looked up.
* - The resources associated with the ofile remain.
*
* Transition T0
*
* This transition occurs in smb_ofile_open(). A new ofile is created and
* added to the list of ofiles of a tree.
*
* Transition T1
*
* This transition occurs in smb_ofile_close().
*
* Transition T2
*
* This transition occurs in smb_ofile_release(). The resources associated
* with the ofile are freed as well as the ofile structure. For the
* transition to occur, the ofile must be in the SMB_OFILE_STATE_CLOSED
* state and the reference count be zero.
*
* Comments
* --------
*
* The state machine of the ofile structures is controlled by 3 elements:
* - The list of ofiles of the tree it belongs to.
* - The mutex embedded in the structure itself.
* - The reference count.
*
* There's a mutex embedded in the ofile structure used to protect its fields
* and there's a lock embedded in the list of ofiles of a tree. To
* increment or to decrement the reference count the mutex must be entered.
* To insert the ofile into the list of ofiles of the tree and to remove
* the ofile from it, the lock must be entered in RW_WRITER mode.
*
* Rules of access to a ofile structure:
*
* 1) In order to avoid deadlocks, when both (mutex and lock of the ofile
* list) have to be entered, the lock must be entered first.
*
* 2) All actions applied to an ofile require a reference count.
*
* 3) There are 2 ways of getting a reference count. One is when the ofile
* is opened. The other one when the ofile is looked up. This translates
* into 2 functions: smb_ofile_open() and smb_ofile_lookup_by_fid().
*
* It should be noted that the reference count of an ofile registers the
* number of references to the ofile in other structures (such as an smb
* request). The reference count is not incremented in these 2 instances:
*
* 1) The ofile is open. An ofile is anchored by his state. If there's
* no activity involving an ofile currently open, the reference count
* of that ofile is zero.
*
* 2) The ofile is queued in the list of ofiles of its tree. The fact of
* being queued in that list is NOT registered by incrementing the
* reference count.
*/
#include <smbsrv/smb_kproto.h>
#include <smbsrv/smb_fsops.h>
static boolean_t smb_ofile_is_open_locked(smb_ofile_t *);
static smb_ofile_t *smb_ofile_close_and_next(smb_ofile_t *);
static int smb_ofile_netinfo_encode(smb_ofile_t *, uint8_t *, size_t,
uint32_t *);
static int smb_ofile_netinfo_init(smb_ofile_t *, smb_netfileinfo_t *);
static void smb_ofile_netinfo_fini(smb_netfileinfo_t *);
/*
* smb_ofile_open
*/
smb_ofile_t *
smb_ofile_open(
smb_request_t *sr,
smb_node_t *node,
struct open_param *op,
uint16_t ftype,
uint32_t uniqid,
smb_error_t *err)
{
smb_tree_t *tree = sr->tid_tree;
smb_ofile_t *of;
uint16_t fid;
smb_attr_t attr;
int rc;
enum errstates { EMPTY, FIDALLOC, CRHELD, MUTEXINIT };
enum errstates state = EMPTY;
if (smb_idpool_alloc(&tree->t_fid_pool, &fid)) {
err->status = NT_STATUS_TOO_MANY_OPENED_FILES;
err->errcls = ERRDOS;
err->errcode = ERROR_TOO_MANY_OPEN_FILES;
return (NULL);
}
state = FIDALLOC;
of = kmem_cache_alloc(smb_cache_ofile, KM_SLEEP);
bzero(of, sizeof (smb_ofile_t));
of->f_magic = SMB_OFILE_MAGIC;
of->f_refcnt = 1;
of->f_fid = fid;
of->f_uniqid = uniqid;
of->f_opened_by_pid = sr->smb_pid;
of->f_granted_access = op->desired_access;
of->f_share_access = op->share_access;
of->f_create_options = op->create_options;
of->f_cr = (op->create_options & FILE_OPEN_FOR_BACKUP_INTENT) ?
smb_user_getprivcred(sr->uid_user) : sr->uid_user->u_cred;
crhold(of->f_cr);
state = CRHELD;
of->f_ftype = ftype;
of->f_server = tree->t_server;
of->f_session = tree->t_session;
/*
* grab a ref for of->f_user
* released in smb_ofile_delete()
*/
smb_user_hold_internal(sr->uid_user);
of->f_user = sr->uid_user;
of->f_tree = tree;
of->f_node = node;
mutex_init(&of->f_mutex, NULL, MUTEX_DEFAULT, NULL);
state = MUTEXINIT;
of->f_state = SMB_OFILE_STATE_OPEN;
if (ftype == SMB_FTYPE_MESG_PIPE) {
/* See smb_opipe_open. */
of->f_pipe = op->pipe;
smb_server_inc_pipes(of->f_server);
} else {
ASSERT(ftype == SMB_FTYPE_DISK); /* Regular file, not a pipe */
ASSERT(node);
/*
* Note that the common open path often adds bits like
* READ_CONTROL, so the logic "is this open exec-only"
* needs to look at only the FILE_DATA_ALL bits.
*/
if ((of->f_granted_access & FILE_DATA_ALL) == FILE_EXECUTE)
of->f_flags |= SMB_OFLAGS_EXECONLY;
bzero(&attr, sizeof (smb_attr_t));
attr.sa_mask = SMB_AT_UID | SMB_AT_DOSATTR;
rc = smb_node_getattr(NULL, node, of->f_cr, NULL, &attr);
if (rc != 0) {
err->status = NT_STATUS_INTERNAL_ERROR;
err->errcls = ERRDOS;
err->errcode = ERROR_INTERNAL_ERROR;
goto errout;
}
if (crgetuid(of->f_cr) == attr.sa_vattr.va_uid) {
/*
* Add this bit for the file's owner even if it's not
* specified in the request (Windows behavior).
*/
of->f_granted_access |= FILE_READ_ATTRIBUTES;
}
if (smb_node_is_file(node)) {
of->f_mode =
smb_fsop_amask_to_omode(of->f_granted_access);
if (smb_fsop_open(node, of->f_mode, of->f_cr) != 0) {
err->status = NT_STATUS_ACCESS_DENIED;
err->errcls = ERRDOS;
err->errcode = ERROR_ACCESS_DENIED;
goto errout;
}
}
if (tree->t_flags & SMB_TREE_READONLY)
of->f_flags |= SMB_OFLAGS_READONLY;
/*
* Note that if we created_readonly, that
* will _not_ yet show in attr.sa_dosattr
* so creating a readonly file gives the
* caller a writable handle as it should.
*/
if (attr.sa_dosattr & FILE_ATTRIBUTE_READONLY)
of->f_flags |= SMB_OFLAGS_READONLY;
smb_node_inc_open_ofiles(node);
smb_node_add_ofile(node, of);
smb_node_ref(node);
smb_server_inc_files(of->f_server);
}
smb_llist_enter(&tree->t_ofile_list, RW_WRITER);
smb_llist_insert_tail(&tree->t_ofile_list, of);
smb_llist_exit(&tree->t_ofile_list);
atomic_inc_32(&tree->t_open_files);
atomic_inc_32(&of->f_session->s_file_cnt);
return (of);
errout:
switch (state) {
case MUTEXINIT:
mutex_destroy(&of->f_mutex);
smb_user_release(of->f_user);
/*FALLTHROUGH*/
case CRHELD:
crfree(of->f_cr);
of->f_magic = 0;
kmem_cache_free(smb_cache_ofile, of);
/*FALLTHROUGH*/
case FIDALLOC:
smb_idpool_free(&tree->t_fid_pool, fid);
/*FALLTHROUGH*/
case EMPTY:
break;
}
return (NULL);
}
/*
* smb_ofile_close
*/
void
smb_ofile_close(smb_ofile_t *of, int32_t mtime_sec)
{
smb_attr_t *pa;
timestruc_t now;
uint32_t flags = 0;
SMB_OFILE_VALID(of);
mutex_enter(&of->f_mutex);
ASSERT(of->f_refcnt);
if (of->f_state != SMB_OFILE_STATE_OPEN) {
mutex_exit(&of->f_mutex);
return;
}
of->f_state = SMB_OFILE_STATE_CLOSING;
mutex_exit(&of->f_mutex);
switch (of->f_ftype) {
case SMB_FTYPE_BYTE_PIPE:
case SMB_FTYPE_MESG_PIPE:
smb_opipe_close(of);
smb_server_dec_pipes(of->f_server);
break;
case SMB_FTYPE_DISK:
case SMB_FTYPE_PRINTER:
/*
* In here we make changes to of->f_pending_attr
* while not holding of->f_mutex. This is OK
* because we've changed f_state to CLOSING,
* so no more threads will take this path.
*/
pa = &of->f_pending_attr;
if (mtime_sec != 0) {
pa->sa_vattr.va_mtime.tv_sec = mtime_sec;
pa->sa_mask |= SMB_AT_MTIME;
}
/*
* If we have ever modified data via this handle
* (write or truncate) and if the mtime was not
* set via this handle, update the mtime again
* during the close. Windows expects this.
* [ MS-FSA 2.1.5.4 "Update Timestamps" ]
*/
if (of->f_written &&
(pa->sa_mask & SMB_AT_MTIME) == 0) {
pa->sa_mask |= SMB_AT_MTIME;
gethrestime(&now);
pa->sa_vattr.va_mtime = now;
}
if (of->f_flags & SMB_OFLAGS_SET_DELETE_ON_CLOSE) {
if (smb_tree_has_feature(of->f_tree,
SMB_TREE_CATIA)) {
flags |= SMB_CATIA;
}
(void) smb_node_set_delete_on_close(of->f_node,
of->f_cr, flags);
}
smb_fsop_unshrlock(of->f_cr, of->f_node, of->f_uniqid);
smb_node_destroy_lock_by_ofile(of->f_node, of);
if (smb_node_is_file(of->f_node)) {
(void) smb_fsop_close(of->f_node, of->f_mode,
of->f_cr);
smb_oplock_release(of->f_node, of);
} else {
/*
* If there was an odir, close it.
*/
if (of->f_odir != NULL)
smb_odir_close(of->f_odir);
}
if (smb_node_dec_open_ofiles(of->f_node) == 0) {
/*
* Last close. The f_pending_attr has
* only times (atime,ctime,mtime) so
* we can borrow it to commit the
* n_pending_dosattr from the node.
*/
pa->sa_dosattr =
of->f_node->n_pending_dosattr;
if (pa->sa_dosattr != 0)
pa->sa_mask |= SMB_AT_DOSATTR;
/* Let's leave this zero when not in use. */
of->f_node->n_allocsz = 0;
}
if (pa->sa_mask != 0) {
/*
* Commit any pending attributes from
* the ofile we're closing. Note that
* we pass NULL as the ofile to setattr
* so it will write to the file system
* and not keep anything on the ofile.
* This clears n_pending_dosattr if
* there are no opens, otherwise the
* dosattr will be pending again.
*/
(void) smb_node_setattr(NULL, of->f_node,
of->f_cr, NULL, pa);
}
/*
* Cancel any notify change requests that
* may be using this open instance.
*/
if (of->f_node->n_fcn.fcn_count)
smb_notify_file_closed(of);
smb_server_dec_files(of->f_server);
break;
}
atomic_dec_32(&of->f_tree->t_open_files);
mutex_enter(&of->f_mutex);
ASSERT(of->f_refcnt);
ASSERT(of->f_state == SMB_OFILE_STATE_CLOSING);
of->f_state = SMB_OFILE_STATE_CLOSED;
mutex_exit(&of->f_mutex);
}
/*
* smb_ofile_close_all
*
*
*/
void
smb_ofile_close_all(
smb_tree_t *tree)
{
smb_ofile_t *of;
ASSERT(tree);
ASSERT(tree->t_magic == SMB_TREE_MAGIC);
smb_llist_enter(&tree->t_ofile_list, RW_READER);
of = smb_llist_head(&tree->t_ofile_list);
while (of) {
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_tree == tree);
of = smb_ofile_close_and_next(of);
}
smb_llist_exit(&tree->t_ofile_list);
}
/*
* smb_ofiles_close_by_pid
*
*
*/
void
smb_ofile_close_all_by_pid(
smb_tree_t *tree,
uint16_t pid)
{
smb_ofile_t *of;
ASSERT(tree);
ASSERT(tree->t_magic == SMB_TREE_MAGIC);
smb_llist_enter(&tree->t_ofile_list, RW_READER);
of = smb_llist_head(&tree->t_ofile_list);
while (of) {
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_tree == tree);
if (of->f_opened_by_pid == pid) {
of = smb_ofile_close_and_next(of);
} else {
of = smb_llist_next(&tree->t_ofile_list, of);
}
}
smb_llist_exit(&tree->t_ofile_list);
}
/*
* If the enumeration request is for ofile data, handle it here.
* Otherwise, return.
*
* This function should be called with a hold on the ofile.
*/
int
smb_ofile_enum(smb_ofile_t *of, smb_svcenum_t *svcenum)
{
uint8_t *pb;
uint_t nbytes;
int rc;
ASSERT(of);
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_refcnt);
if (svcenum->se_type != SMB_SVCENUM_TYPE_FILE)
return (0);
if (svcenum->se_nskip > 0) {
svcenum->se_nskip--;
return (0);
}
if (svcenum->se_nitems >= svcenum->se_nlimit) {
svcenum->se_nitems = svcenum->se_nlimit;
return (0);
}
pb = &svcenum->se_buf[svcenum->se_bused];
rc = smb_ofile_netinfo_encode(of, pb, svcenum->se_bavail,
&nbytes);
if (rc == 0) {
svcenum->se_bavail -= nbytes;
svcenum->se_bused += nbytes;
svcenum->se_nitems++;
}
return (rc);
}
/*
* Take a reference on an open file.
*/
boolean_t
smb_ofile_hold(smb_ofile_t *of)
{
ASSERT(of);
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
mutex_enter(&of->f_mutex);
if (of->f_state != SMB_OFILE_STATE_OPEN) {
mutex_exit(&of->f_mutex);
return (B_FALSE);
}
of->f_refcnt++;
mutex_exit(&of->f_mutex);
return (B_TRUE);
}
/*
* Release a reference on a file. If the reference count falls to
* zero and the file has been closed, post the object for deletion.
* Object deletion is deferred to avoid modifying a list while an
* iteration may be in progress.
*/
void
smb_ofile_release(smb_ofile_t *of)
{
SMB_OFILE_VALID(of);
mutex_enter(&of->f_mutex);
ASSERT(of->f_refcnt);
of->f_refcnt--;
switch (of->f_state) {
case SMB_OFILE_STATE_OPEN:
case SMB_OFILE_STATE_CLOSING:
break;
case SMB_OFILE_STATE_CLOSED:
if (of->f_refcnt == 0)
smb_tree_post_ofile(of->f_tree, of);
break;
default:
ASSERT(0);
break;
}
mutex_exit(&of->f_mutex);
}
/*
* smb_ofile_request_complete
*
* During oplock acquisition, all other oplock requests on the node
* are blocked until the acquire request completes and the response
* is on the wire.
* Call smb_oplock_broadcast to notify the node that the request
* has completed.
*
* THIS MECHANISM RELIES ON THE FACT THAT THE OFILE IS NOT REMOVED
* FROM THE SR UNTIL REQUEST COMPLETION (when the sr is destroyed)
*/
void
smb_ofile_request_complete(smb_ofile_t *of)
{
SMB_OFILE_VALID(of);
switch (of->f_ftype) {
case SMB_FTYPE_DISK:
ASSERT(of->f_node);
smb_oplock_broadcast(of->f_node);
break;
case SMB_FTYPE_MESG_PIPE:
break;
default:
break;
}
}
/*
* smb_ofile_lookup_by_fid
*
* Find the open file whose fid matches the one specified in the request.
* If we can't find the fid or the shares (trees) don't match, we have a
* bad fid.
*/
smb_ofile_t *
smb_ofile_lookup_by_fid(
smb_request_t *sr,
uint16_t fid)
{
smb_tree_t *tree = sr->tid_tree;
smb_llist_t *of_list;
smb_ofile_t *of;
ASSERT(tree->t_magic == SMB_TREE_MAGIC);
of_list = &tree->t_ofile_list;
smb_llist_enter(of_list, RW_READER);
of = smb_llist_head(of_list);
while (of) {
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_tree == tree);
if (of->f_fid == fid)
break;
of = smb_llist_next(of_list, of);
}
if (of == NULL)
goto out;
/*
* Only allow use of a given FID with the same UID that
* was used to open it. MS-CIFS 3.3.5.14
*/
if (of->f_user != sr->uid_user) {
of = NULL;
goto out;
}
mutex_enter(&of->f_mutex);
if (of->f_state != SMB_OFILE_STATE_OPEN) {
mutex_exit(&of->f_mutex);
of = NULL;
goto out;
}
of->f_refcnt++;
mutex_exit(&of->f_mutex);
out:
smb_llist_exit(of_list);
return (of);
}
/*
* smb_ofile_lookup_by_uniqid
*
* Find the open file whose uniqid matches the one specified in the request.
*/
smb_ofile_t *
smb_ofile_lookup_by_uniqid(smb_tree_t *tree, uint32_t uniqid)
{
smb_llist_t *of_list;
smb_ofile_t *of;
ASSERT(tree->t_magic == SMB_TREE_MAGIC);
of_list = &tree->t_ofile_list;
smb_llist_enter(of_list, RW_READER);
of = smb_llist_head(of_list);
while (of) {
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_tree == tree);
if (of->f_uniqid == uniqid) {
if (smb_ofile_hold(of)) {
smb_llist_exit(of_list);
return (of);
}
}
of = smb_llist_next(of_list, of);
}
smb_llist_exit(of_list);
return (NULL);
}
/*
* Disallow NetFileClose on certain ofiles to avoid side-effects.
* Closing a tree root is not allowed: use NetSessionDel or NetShareDel.
* Closing SRVSVC connections is not allowed because this NetFileClose
* request may depend on this ofile.
*/
boolean_t
smb_ofile_disallow_fclose(smb_ofile_t *of)
{
ASSERT(of);
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_refcnt);
switch (of->f_ftype) {
case SMB_FTYPE_DISK:
ASSERT(of->f_tree);
return (of->f_node == of->f_tree->t_snode);
case SMB_FTYPE_MESG_PIPE:
ASSERT(of->f_pipe);
if (smb_strcasecmp(of->f_pipe->p_name, "SRVSVC", 0) == 0)
return (B_TRUE);
break;
default:
break;
}
return (B_FALSE);
}
/*
* smb_ofile_set_flags
*
* Return value:
*
* Current flags value
*
*/
void
smb_ofile_set_flags(
smb_ofile_t *of,
uint32_t flags)
{
ASSERT(of);
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_refcnt);
mutex_enter(&of->f_mutex);
of->f_flags |= flags;
mutex_exit(&of->f_mutex);
}
/*
* smb_ofile_seek
*
* Return value:
*
* 0 Success
* EINVAL Unknown mode
* EOVERFLOW offset too big
*
*/
int
smb_ofile_seek(
smb_ofile_t *of,
ushort_t mode,
int32_t off,
uint32_t *retoff)
{
u_offset_t newoff = 0;
int rc = 0;
smb_attr_t attr;
ASSERT(of);
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
ASSERT(of->f_refcnt);
mutex_enter(&of->f_mutex);
switch (mode) {
case SMB_SEEK_SET:
if (off < 0)
newoff = 0;
else
newoff = (u_offset_t)off;
break;
case SMB_SEEK_CUR:
if (off < 0 && (-off) > of->f_seek_pos)
newoff = 0;
else
newoff = of->f_seek_pos + (u_offset_t)off;
break;
case SMB_SEEK_END:
bzero(&attr, sizeof (smb_attr_t));
attr.sa_mask |= SMB_AT_SIZE;
rc = smb_fsop_getattr(NULL, zone_kcred(), of->f_node, &attr);
if (rc != 0) {
mutex_exit(&of->f_mutex);
return (rc);
}
if (off < 0 && (-off) > attr.sa_vattr.va_size)
newoff = 0;
else
newoff = attr.sa_vattr.va_size + (u_offset_t)off;
break;
default:
mutex_exit(&of->f_mutex);
return (EINVAL);
}
/*
* See comments at the beginning of smb_seek.c.
* If the offset is greater than UINT_MAX, we will return an error.
*/
if (newoff > UINT_MAX) {
rc = EOVERFLOW;
} else {
of->f_seek_pos = newoff;
*retoff = (uint32_t)newoff;
}
mutex_exit(&of->f_mutex);
return (rc);
}
/*
* smb_ofile_flush
*
* If writes on this file are not synchronous, flush it using the NFSv3
* commit interface.
*
* XXX - todo: Flush named pipe should drain writes.
*/
void
smb_ofile_flush(struct smb_request *sr, struct smb_ofile *of)
{
switch (of->f_ftype) {
case SMB_FTYPE_DISK:
if ((of->f_node->flags & NODE_FLAGS_WRITE_THROUGH) == 0)
(void) smb_fsop_commit(sr, of->f_cr, of->f_node);
break;
default:
break;
}
}
/*
* smb_ofile_is_open
*/
boolean_t
smb_ofile_is_open(smb_ofile_t *of)
{
boolean_t rc;
SMB_OFILE_VALID(of);
mutex_enter(&of->f_mutex);
rc = smb_ofile_is_open_locked(of);
mutex_exit(&of->f_mutex);
return (rc);
}
/* *************************** Static Functions ***************************** */
/*
* Determine whether or not an ofile is open.
* This function must be called with the mutex held.
*/
static boolean_t
smb_ofile_is_open_locked(smb_ofile_t *of)
{
switch (of->f_state) {
case SMB_OFILE_STATE_OPEN:
return (B_TRUE);
case SMB_OFILE_STATE_CLOSING:
case SMB_OFILE_STATE_CLOSED:
return (B_FALSE);
default:
ASSERT(0);
return (B_FALSE);
}
}
/*
* This function closes the file passed in (if appropriate) and returns the
* next open file in the list of open files of the tree of the open file passed
* in. It requires that the list of open files of the tree be entered in
* RW_READER mode before being called.
*/
static smb_ofile_t *
smb_ofile_close_and_next(smb_ofile_t *of)
{
smb_ofile_t *next_of;
smb_tree_t *tree;
ASSERT(of);
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
mutex_enter(&of->f_mutex);
switch (of->f_state) {
case SMB_OFILE_STATE_OPEN:
/* The file is still open. */
of->f_refcnt++;
ASSERT(of->f_refcnt);
tree = of->f_tree;
mutex_exit(&of->f_mutex);
smb_llist_exit(&of->f_tree->t_ofile_list);
smb_ofile_close(of, 0);
smb_ofile_release(of);
smb_llist_enter(&tree->t_ofile_list, RW_READER);
next_of = smb_llist_head(&tree->t_ofile_list);
break;
case SMB_OFILE_STATE_CLOSING:
case SMB_OFILE_STATE_CLOSED:
/*
* The ofile exists but is closed or
* in the process being closed.
*/
mutex_exit(&of->f_mutex);
next_of = smb_llist_next(&of->f_tree->t_ofile_list, of);
break;
default:
ASSERT(0);
mutex_exit(&of->f_mutex);
next_of = smb_llist_next(&of->f_tree->t_ofile_list, of);
break;
}
return (next_of);
}
/*
* Delete an ofile.
*
* Remove the ofile from the tree list before freeing resources
* associated with the ofile.
*/
void
smb_ofile_delete(void *arg)
{
smb_tree_t *tree;
smb_ofile_t *of = (smb_ofile_t *)arg;
SMB_OFILE_VALID(of);
ASSERT(of->f_refcnt == 0);
ASSERT(of->f_state == SMB_OFILE_STATE_CLOSED);
ASSERT(!SMB_OFILE_OPLOCK_GRANTED(of));
tree = of->f_tree;
smb_llist_enter(&tree->t_ofile_list, RW_WRITER);
smb_llist_remove(&tree->t_ofile_list, of);
smb_idpool_free(&tree->t_fid_pool, of->f_fid);
atomic_dec_32(&tree->t_session->s_file_cnt);
smb_llist_exit(&tree->t_ofile_list);
mutex_enter(&of->f_mutex);
mutex_exit(&of->f_mutex);
switch (of->f_ftype) {
case SMB_FTYPE_BYTE_PIPE:
case SMB_FTYPE_MESG_PIPE:
smb_opipe_dealloc(of->f_pipe);
of->f_pipe = NULL;
break;
case SMB_FTYPE_DISK:
if (of->f_odir != NULL)
smb_odir_release(of->f_odir);
smb_node_rem_ofile(of->f_node, of);
smb_node_release(of->f_node);
break;
default:
ASSERT(!"f_ftype");
break;
}
of->f_magic = (uint32_t)~SMB_OFILE_MAGIC;
mutex_destroy(&of->f_mutex);
crfree(of->f_cr);
smb_user_release(of->f_user);
kmem_cache_free(smb_cache_ofile, of);
}
/*
* smb_ofile_access
*
* This function will check to see if the access requested is granted.
* Returns NT status codes.
*/
uint32_t
smb_ofile_access(smb_ofile_t *of, cred_t *cr, uint32_t access)
{
if ((of == NULL) || (cr == zone_kcred()))
return (NT_STATUS_SUCCESS);
/*
* If the request is for something
* I don't grant it is an error
*/
if (~(of->f_granted_access) & access) {
if (!(of->f_granted_access & ACCESS_SYSTEM_SECURITY) &&
(access & ACCESS_SYSTEM_SECURITY)) {
return (NT_STATUS_PRIVILEGE_NOT_HELD);
}
return (NT_STATUS_ACCESS_DENIED);
}
return (NT_STATUS_SUCCESS);
}
/*
* smb_ofile_share_check
*
* Check if ofile was opened with share access NONE (0).
* Returns: B_TRUE - share access non-zero
* B_FALSE - share access NONE
*/
boolean_t
smb_ofile_share_check(smb_ofile_t *of)
{
return (!SMB_DENY_ALL(of->f_share_access));
}
/*
* check file sharing rules for current open request
* against existing open instances of the same file
*
* Returns NT_STATUS_SHARING_VIOLATION if there is any
* sharing conflict, otherwise returns NT_STATUS_SUCCESS.
*/
uint32_t
smb_ofile_open_check(smb_ofile_t *of, uint32_t desired_access,
uint32_t share_access)
{
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
mutex_enter(&of->f_mutex);
if (of->f_state != SMB_OFILE_STATE_OPEN) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_INVALID_HANDLE);
}
/* if it's just meta data */
if ((of->f_granted_access & FILE_DATA_ALL) == 0) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SUCCESS);
}
/*
* Check requested share access against the
* open granted (desired) access
*/
if (SMB_DENY_DELETE(share_access) && (of->f_granted_access & DELETE)) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
if (SMB_DENY_READ(share_access) &&
(of->f_granted_access & (FILE_READ_DATA | FILE_EXECUTE))) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
if (SMB_DENY_WRITE(share_access) &&
(of->f_granted_access & (FILE_WRITE_DATA | FILE_APPEND_DATA))) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
/* check requested desired access against the open share access */
if (SMB_DENY_DELETE(of->f_share_access) && (desired_access & DELETE)) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
if (SMB_DENY_READ(of->f_share_access) &&
(desired_access & (FILE_READ_DATA | FILE_EXECUTE))) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
if (SMB_DENY_WRITE(of->f_share_access) &&
(desired_access & (FILE_WRITE_DATA | FILE_APPEND_DATA))) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
mutex_exit(&of->f_mutex);
return (NT_STATUS_SUCCESS);
}
/*
* smb_ofile_rename_check
*
* An open file can be renamed if
*
* 1. isn't opened for data writing or deleting
*
* 2. Opened with "Deny Delete" share mode
* But not opened for data reading or executing
* (opened for accessing meta data)
*/
uint32_t
smb_ofile_rename_check(smb_ofile_t *of)
{
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
mutex_enter(&of->f_mutex);
if (of->f_state != SMB_OFILE_STATE_OPEN) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_INVALID_HANDLE);
}
if (of->f_granted_access &
(FILE_WRITE_DATA | FILE_APPEND_DATA | DELETE)) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
if ((of->f_share_access & FILE_SHARE_DELETE) == 0) {
if (of->f_granted_access &
(FILE_READ_DATA | FILE_EXECUTE)) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
}
mutex_exit(&of->f_mutex);
return (NT_STATUS_SUCCESS);
}
/*
* smb_ofile_delete_check
*
* An open file can be deleted only if opened for
* accessing meta data. Share modes aren't important
* in this case.
*
* NOTE: there is another mechanism for deleting an
* open file that NT clients usually use.
* That's setting "Delete on close" flag for an open
* file. In this way the file will be deleted after
* last close. This flag can be set by SmbTrans2SetFileInfo
* with FILE_DISPOSITION_INFO information level.
* For setting this flag, the file should be opened by
* DELETE access in the FID that is passed in the Trans2
* request.
*/
uint32_t
smb_ofile_delete_check(smb_ofile_t *of)
{
ASSERT(of->f_magic == SMB_OFILE_MAGIC);
mutex_enter(&of->f_mutex);
if (of->f_state != SMB_OFILE_STATE_OPEN) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_INVALID_HANDLE);
}
if (of->f_granted_access &
(FILE_READ_DATA | FILE_WRITE_DATA |
FILE_APPEND_DATA | FILE_EXECUTE | DELETE)) {
mutex_exit(&of->f_mutex);
return (NT_STATUS_SHARING_VIOLATION);
}
mutex_exit(&of->f_mutex);
return (NT_STATUS_SUCCESS);
}
cred_t *
smb_ofile_getcred(smb_ofile_t *of)
{
return (of->f_cr);
}
/*
* smb_ofile_set_delete_on_close
*
* Set the DeleteOnClose flag on the smb file. When the file is closed,
* the flag will be transferred to the smb node, which will commit the
* delete operation and inhibit subsequent open requests.
*
* When DeleteOnClose is set on an smb_node, the common open code will
* reject subsequent open requests for the file. Observation of Windows
* 2000 indicates that subsequent opens should be allowed (assuming
* there would be no sharing violation) until the file is closed using
* the fid on which the DeleteOnClose was requested.
*/
void
smb_ofile_set_delete_on_close(smb_ofile_t *of)
{
mutex_enter(&of->f_mutex);
of->f_flags |= SMB_OFLAGS_SET_DELETE_ON_CLOSE;
mutex_exit(&of->f_mutex);
}
/*
* Encode open file information into a buffer; needed in user space to
* support RPC requests.
*/
static int
smb_ofile_netinfo_encode(smb_ofile_t *of, uint8_t *buf, size_t buflen,
uint32_t *nbytes)
{
smb_netfileinfo_t fi;
int rc;
rc = smb_ofile_netinfo_init(of, &fi);
if (rc == 0) {
rc = smb_netfileinfo_encode(&fi, buf, buflen, nbytes);
smb_ofile_netinfo_fini(&fi);
}
return (rc);
}
static int
smb_ofile_netinfo_init(smb_ofile_t *of, smb_netfileinfo_t *fi)
{
smb_user_t *user;
smb_tree_t *tree;
smb_node_t *node;
char *path;
char *buf;
int rc;
ASSERT(of);
user = of->f_user;
tree = of->f_tree;
ASSERT(user);
ASSERT(tree);
buf = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
switch (of->f_ftype) {
case SMB_FTYPE_DISK:
node = of->f_node;
ASSERT(node);
fi->fi_permissions = of->f_granted_access;
fi->fi_numlocks = smb_lock_get_lock_count(node, of);
path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
if (node != tree->t_snode) {
rc = smb_node_getshrpath(node, tree, path, MAXPATHLEN);
if (rc != 0)
(void) strlcpy(path, node->od_name, MAXPATHLEN);
}
(void) snprintf(buf, MAXPATHLEN, "%s:%s", tree->t_sharename,
path);
kmem_free(path, MAXPATHLEN);
break;
case SMB_FTYPE_MESG_PIPE:
ASSERT(of->f_pipe);
fi->fi_permissions = FILE_READ_DATA | FILE_WRITE_DATA |
FILE_EXECUTE;
fi->fi_numlocks = 0;
(void) snprintf(buf, MAXPATHLEN, "\\PIPE\\%s",
of->f_pipe->p_name);
break;
default:
kmem_free(buf, MAXPATHLEN);
return (-1);
}
fi->fi_fid = of->f_fid;
fi->fi_uniqid = of->f_uniqid;
fi->fi_pathlen = strlen(buf) + 1;
fi->fi_path = smb_mem_strdup(buf);
kmem_free(buf, MAXPATHLEN);
fi->fi_namelen = user->u_domain_len + user->u_name_len + 2;
fi->fi_username = kmem_alloc(fi->fi_namelen, KM_SLEEP);
(void) snprintf(fi->fi_username, fi->fi_namelen, "%s\\%s",
user->u_domain, user->u_name);
return (0);
}
static void
smb_ofile_netinfo_fini(smb_netfileinfo_t *fi)
{
if (fi == NULL)
return;
if (fi->fi_path)
smb_mem_free(fi->fi_path);
if (fi->fi_username)
kmem_free(fi->fi_username, fi->fi_namelen);
bzero(fi, sizeof (smb_netfileinfo_t));
}
/*
* A query of user and group quotas may span multiple requests.
* f_quota_resume is used to determine where the query should
* be resumed, in a subsequent request. f_quota_resume contains
* the SID of the last quota entry returned to the client.
*/
void
smb_ofile_set_quota_resume(smb_ofile_t *ofile, char *resume)
{
ASSERT(ofile);
mutex_enter(&ofile->f_mutex);
if (resume == NULL)
bzero(ofile->f_quota_resume, SMB_SID_STRSZ);
else
(void) strlcpy(ofile->f_quota_resume, resume, SMB_SID_STRSZ);
mutex_exit(&ofile->f_mutex);
}
void
smb_ofile_get_quota_resume(smb_ofile_t *ofile, char *buf, int bufsize)
{
ASSERT(ofile);
mutex_enter(&ofile->f_mutex);
(void) strlcpy(buf, ofile->f_quota_resume, bufsize);
mutex_exit(&ofile->f_mutex);
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_5217_2 |
crossvul-cpp_data_good_99_1 | /*
* Packet matching code.
*
* Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
* Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
* Copyright (C) 2006-2010 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cache.h>
#include <linux/capability.h>
#include <linux/skbuff.h>
#include <linux/kmod.h>
#include <linux/vmalloc.h>
#include <linux/netdevice.h>
#include <linux/module.h>
#include <linux/icmp.h>
#include <net/ip.h>
#include <net/compat.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/proc_fs.h>
#include <linux/err.h>
#include <linux/cpumask.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <net/netfilter/nf_log.h>
#include "../../netfilter/xt_repldata.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("IPv4 packet filter");
void *ipt_alloc_initial_table(const struct xt_table *info)
{
return xt_alloc_initial_table(ipt, IPT);
}
EXPORT_SYMBOL_GPL(ipt_alloc_initial_table);
/* Returns whether matches rule or not. */
/* Performance critical - called for every packet */
static inline bool
ip_packet_match(const struct iphdr *ip,
const char *indev,
const char *outdev,
const struct ipt_ip *ipinfo,
int isfrag)
{
unsigned long ret;
if (NF_INVF(ipinfo, IPT_INV_SRCIP,
(ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) ||
NF_INVF(ipinfo, IPT_INV_DSTIP,
(ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr))
return false;
ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask);
if (NF_INVF(ipinfo, IPT_INV_VIA_IN, ret != 0))
return false;
ret = ifname_compare_aligned(outdev, ipinfo->outiface, ipinfo->outiface_mask);
if (NF_INVF(ipinfo, IPT_INV_VIA_OUT, ret != 0))
return false;
/* Check specific protocol */
if (ipinfo->proto &&
NF_INVF(ipinfo, IPT_INV_PROTO, ip->protocol != ipinfo->proto))
return false;
/* If we have a fragment rule but the packet is not a fragment
* then we return zero */
if (NF_INVF(ipinfo, IPT_INV_FRAG,
(ipinfo->flags & IPT_F_FRAG) && !isfrag))
return false;
return true;
}
static bool
ip_checkentry(const struct ipt_ip *ip)
{
if (ip->flags & ~IPT_F_MASK)
return false;
if (ip->invflags & ~IPT_INV_MASK)
return false;
return true;
}
static unsigned int
ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);
return NF_DROP;
}
/* Performance critical */
static inline struct ipt_entry *
get_entry(const void *base, unsigned int offset)
{
return (struct ipt_entry *)(base + offset);
}
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
static inline bool unconditional(const struct ipt_entry *e)
{
static const struct ipt_ip uncond;
return e->target_offset == sizeof(struct ipt_entry) &&
memcmp(&e->ip, &uncond, sizeof(uncond)) == 0;
}
/* for const-correctness */
static inline const struct xt_entry_target *
ipt_get_target_c(const struct ipt_entry *e)
{
return ipt_get_target((struct ipt_entry *)e);
}
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
static const char *const hooknames[] = {
[NF_INET_PRE_ROUTING] = "PREROUTING",
[NF_INET_LOCAL_IN] = "INPUT",
[NF_INET_FORWARD] = "FORWARD",
[NF_INET_LOCAL_OUT] = "OUTPUT",
[NF_INET_POST_ROUTING] = "POSTROUTING",
};
enum nf_ip_trace_comments {
NF_IP_TRACE_COMMENT_RULE,
NF_IP_TRACE_COMMENT_RETURN,
NF_IP_TRACE_COMMENT_POLICY,
};
static const char *const comments[] = {
[NF_IP_TRACE_COMMENT_RULE] = "rule",
[NF_IP_TRACE_COMMENT_RETURN] = "return",
[NF_IP_TRACE_COMMENT_POLICY] = "policy",
};
static const struct nf_loginfo trace_loginfo = {
.type = NF_LOG_TYPE_LOG,
.u = {
.log = {
.level = 4,
.logflags = NF_LOG_DEFAULT_MASK,
},
},
};
/* Mildly perf critical (only if packet tracing is on) */
static inline int
get_chainname_rulenum(const struct ipt_entry *s, const struct ipt_entry *e,
const char *hookname, const char **chainname,
const char **comment, unsigned int *rulenum)
{
const struct xt_standard_target *t = (void *)ipt_get_target_c(s);
if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
/* Head of user chain: ERROR target with chainname */
*chainname = t->target.data;
(*rulenum) = 0;
} else if (s == e) {
(*rulenum)++;
if (unconditional(s) &&
strcmp(t->target.u.kernel.target->name,
XT_STANDARD_TARGET) == 0 &&
t->verdict < 0) {
/* Tail of chains: STANDARD target (return/policy) */
*comment = *chainname == hookname
? comments[NF_IP_TRACE_COMMENT_POLICY]
: comments[NF_IP_TRACE_COMMENT_RETURN];
}
return 1;
} else
(*rulenum)++;
return 0;
}
static void trace_packet(struct net *net,
const struct sk_buff *skb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
const char *tablename,
const struct xt_table_info *private,
const struct ipt_entry *e)
{
const struct ipt_entry *root;
const char *hookname, *chainname, *comment;
const struct ipt_entry *iter;
unsigned int rulenum = 0;
root = get_entry(private->entries, private->hook_entry[hook]);
hookname = chainname = hooknames[hook];
comment = comments[NF_IP_TRACE_COMMENT_RULE];
xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
if (get_chainname_rulenum(iter, e, hookname,
&chainname, &comment, &rulenum) != 0)
break;
nf_log_trace(net, AF_INET, hook, skb, in, out, &trace_loginfo,
"TRACE: %s:%s:%s:%u ",
tablename, chainname, comment, rulenum);
}
#endif
static inline
struct ipt_entry *ipt_next_entry(const struct ipt_entry *entry)
{
return (void *)entry + entry->next_offset;
}
/* Returns one of the generic firewall policies, like NF_ACCEPT. */
unsigned int
ipt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table)
{
unsigned int hook = state->hook;
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct iphdr *ip;
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
const char *indev, *outdev;
const void *table_base;
struct ipt_entry *e, **jumpstack;
unsigned int stackidx, cpu;
const struct xt_table_info *private;
struct xt_action_param acpar;
unsigned int addend;
/* Initialization */
stackidx = 0;
ip = ip_hdr(skb);
indev = state->in ? state->in->name : nulldevname;
outdev = state->out ? state->out->name : nulldevname;
/* We handle fragments by dealing with the first fragment as
* if it was a normal packet. All other fragments are treated
* normally, except that they will NEVER match rules that ask
* things we don't know, ie. tcp syn flag or ports). If the
* rule is also a fragment-specific rule, non-fragments won't
* match it. */
acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET;
acpar.thoff = ip_hdrlen(skb);
acpar.hotdrop = false;
acpar.state = state;
WARN_ON(!(table->valid_hooks & (1 << hook)));
local_bh_disable();
addend = xt_write_recseq_begin();
private = READ_ONCE(table->private); /* Address dependency. */
cpu = smp_processor_id();
table_base = private->entries;
jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
/* Switch to alternate jumpstack if we're being invoked via TEE.
* TEE issues XT_CONTINUE verdict on original skb so we must not
* clobber the jumpstack.
*
* For recursion via REJECT or SYNPROXY the stack will be clobbered
* but it is no problem since absolute verdict is issued by these.
*/
if (static_key_false(&xt_tee_enabled))
jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated);
e = get_entry(table_base, private->hook_entry[hook]);
do {
const struct xt_entry_target *t;
const struct xt_entry_match *ematch;
struct xt_counters *counter;
WARN_ON(!e);
if (!ip_packet_match(ip, indev, outdev,
&e->ip, acpar.fragoff)) {
no_match:
e = ipt_next_entry(e);
continue;
}
xt_ematch_foreach(ematch, e) {
acpar.match = ematch->u.kernel.match;
acpar.matchinfo = ematch->data;
if (!acpar.match->match(skb, &acpar))
goto no_match;
}
counter = xt_get_this_cpu_counter(&e->counters);
ADD_COUNTER(*counter, skb->len, 1);
t = ipt_get_target(e);
WARN_ON(!t->u.kernel.target);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
/* The packet is traced: log it */
if (unlikely(skb->nf_trace))
trace_packet(state->net, skb, hook, state->in,
state->out, table->name, private, e);
#endif
/* Standard target? */
if (!t->u.kernel.target->target) {
int v;
v = ((struct xt_standard_target *)t)->verdict;
if (v < 0) {
/* Pop from stack? */
if (v != XT_RETURN) {
verdict = (unsigned int)(-v) - 1;
break;
}
if (stackidx == 0) {
e = get_entry(table_base,
private->underflow[hook]);
} else {
e = jumpstack[--stackidx];
e = ipt_next_entry(e);
}
continue;
}
if (table_base + v != ipt_next_entry(e) &&
!(e->ip.flags & IPT_F_GOTO)) {
if (unlikely(stackidx >= private->stacksize)) {
verdict = NF_DROP;
break;
}
jumpstack[stackidx++] = e;
}
e = get_entry(table_base, v);
continue;
}
acpar.target = t->u.kernel.target;
acpar.targinfo = t->data;
verdict = t->u.kernel.target->target(skb, &acpar);
if (verdict == XT_CONTINUE) {
/* Target might have changed stuff. */
ip = ip_hdr(skb);
e = ipt_next_entry(e);
} else {
/* Verdict */
break;
}
} while (!acpar.hotdrop);
xt_write_recseq_end(addend);
local_bh_enable();
if (acpar.hotdrop)
return NF_DROP;
else return verdict;
}
/* Figures out from what hook each rule can be called: returns 0 if
there are loops. Puts hook bitmask in comefrom. */
static int
mark_source_chains(const struct xt_table_info *newinfo,
unsigned int valid_hooks, void *entry0,
unsigned int *offsets)
{
unsigned int hook;
/* No recursion; use packet counter to save back ptrs (reset
to 0 as we leave), and comefrom to save source hook bitmask */
for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
unsigned int pos = newinfo->hook_entry[hook];
struct ipt_entry *e = entry0 + pos;
if (!(valid_hooks & (1 << hook)))
continue;
/* Set initial back pointer. */
e->counters.pcnt = pos;
for (;;) {
const struct xt_standard_target *t
= (void *)ipt_get_target_c(e);
int visited = e->comefrom & (1 << hook);
if (e->comefrom & (1 << NF_INET_NUMHOOKS))
return 0;
e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
/* Unconditional return/END. */
if ((unconditional(e) &&
(strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < 0) || visited) {
unsigned int oldpos, size;
if ((strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0) &&
t->verdict < -NF_MAX_VERDICT - 1)
return 0;
/* Return: backtrack through the last
big jump. */
do {
e->comefrom ^= (1<<NF_INET_NUMHOOKS);
oldpos = pos;
pos = e->counters.pcnt;
e->counters.pcnt = 0;
/* We're at the start. */
if (pos == oldpos)
goto next;
e = entry0 + pos;
} while (oldpos == pos + e->next_offset);
/* Move along one */
size = e->next_offset;
e = entry0 + pos + size;
if (pos + size >= newinfo->size)
return 0;
e->counters.pcnt = pos;
pos += size;
} else {
int newpos = t->verdict;
if (strcmp(t->target.u.user.name,
XT_STANDARD_TARGET) == 0 &&
newpos >= 0) {
/* This a jump; chase it. */
if (!xt_find_jump_offset(offsets, newpos,
newinfo->number))
return 0;
} else {
/* ... this is a fallthru */
newpos = pos + e->next_offset;
if (newpos >= newinfo->size)
return 0;
}
e = entry0 + newpos;
e->counters.pcnt = pos;
pos = newpos;
}
}
next: ;
}
return 1;
}
static void cleanup_match(struct xt_entry_match *m, struct net *net)
{
struct xt_mtdtor_param par;
par.net = net;
par.match = m->u.kernel.match;
par.matchinfo = m->data;
par.family = NFPROTO_IPV4;
if (par.match->destroy != NULL)
par.match->destroy(&par);
module_put(par.match->me);
}
static int
check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
par->match = m->u.kernel.match;
par->matchinfo = m->data;
return xt_check_match(par, m->u.match_size - sizeof(*m),
ip->proto, ip->invflags & IPT_INV_PROTO);
}
static int
find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
{
struct xt_match *match;
int ret;
match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
m->u.user.revision);
if (IS_ERR(match))
return PTR_ERR(match);
m->u.kernel.match = match;
ret = check_match(m, par);
if (ret)
goto err;
return 0;
err:
module_put(m->u.kernel.match->me);
return ret;
}
static int check_target(struct ipt_entry *e, struct net *net, const char *name)
{
struct xt_entry_target *t = ipt_get_target(e);
struct xt_tgchk_param par = {
.net = net,
.table = name,
.entryinfo = e,
.target = t->u.kernel.target,
.targinfo = t->data,
.hook_mask = e->comefrom,
.family = NFPROTO_IPV4,
};
return xt_check_target(&par, t->u.target_size - sizeof(*t),
e->ip.proto, e->ip.invflags & IPT_INV_PROTO);
}
static int
find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
unsigned int size,
struct xt_percpu_counter_alloc_state *alloc_state)
{
struct xt_entry_target *t;
struct xt_target *target;
int ret;
unsigned int j;
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
if (!xt_percpu_counter_alloc(alloc_state, &e->counters))
return -ENOMEM;
j = 0;
mtpar.net = net;
mtpar.table = name;
mtpar.entryinfo = &e->ip;
mtpar.hook_mask = e->comefrom;
mtpar.family = NFPROTO_IPV4;
xt_ematch_foreach(ematch, e) {
ret = find_check_match(ematch, &mtpar);
if (ret != 0)
goto cleanup_matches;
++j;
}
t = ipt_get_target(e);
target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto cleanup_matches;
}
t->u.kernel.target = target;
ret = check_target(e, net, name);
if (ret)
goto err;
return 0;
err:
module_put(t->u.kernel.target->me);
cleanup_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
cleanup_match(ematch, net);
}
xt_percpu_counter_free(&e->counters);
return ret;
}
static bool check_underflow(const struct ipt_entry *e)
{
const struct xt_entry_target *t;
unsigned int verdict;
if (!unconditional(e))
return false;
t = ipt_get_target_c(e);
if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
return false;
verdict = ((struct xt_standard_target *)t)->verdict;
verdict = -verdict - 1;
return verdict == NF_DROP || verdict == NF_ACCEPT;
}
static int
check_entry_size_and_hooks(struct ipt_entry *e,
struct xt_table_info *newinfo,
const unsigned char *base,
const unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
(unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset
< sizeof(struct ipt_entry) + sizeof(struct xt_entry_target))
return -EINVAL;
if (!ip_checkentry(&e->ip))
return -EINVAL;
err = xt_check_entry_offsets(e, e->elems, e->target_offset,
e->next_offset);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
if ((unsigned char *)e - base == underflows[h]) {
if (!check_underflow(e))
return -EINVAL;
newinfo->underflow[h] = underflows[h];
}
}
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
return 0;
}
static void
cleanup_entry(struct ipt_entry *e, struct net *net)
{
struct xt_tgdtor_param par;
struct xt_entry_target *t;
struct xt_entry_match *ematch;
/* Cleanup all matches */
xt_ematch_foreach(ematch, e)
cleanup_match(ematch, net);
t = ipt_get_target(e);
par.net = net;
par.target = t->u.kernel.target;
par.targinfo = t->data;
par.family = NFPROTO_IPV4;
if (par.target->destroy != NULL)
par.target->destroy(&par);
module_put(par.target->me);
xt_percpu_counter_free(&e->counters);
}
/* Checks and translates the user-supplied table segment (held in
newinfo) */
static int
translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
const struct ipt_replace *repl)
{
struct xt_percpu_counter_alloc_state alloc_state = { 0 };
struct ipt_entry *iter;
unsigned int *offsets;
unsigned int i;
int ret = 0;
newinfo->size = repl->size;
newinfo->number = repl->num_entries;
/* Init all hooks to impossible value. */
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
newinfo->hook_entry[i] = 0xFFFFFFFF;
newinfo->underflow[i] = 0xFFFFFFFF;
}
offsets = xt_alloc_entry_offsets(newinfo->number);
if (!offsets)
return -ENOMEM;
i = 0;
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = check_entry_size_and_hooks(iter, newinfo, entry0,
entry0 + repl->size,
repl->hook_entry,
repl->underflow,
repl->valid_hooks);
if (ret != 0)
goto out_free;
if (i < repl->num_entries)
offsets[i] = (void *)iter - entry0;
++i;
if (strcmp(ipt_get_target(iter)->u.user.name,
XT_ERROR_TARGET) == 0)
++newinfo->stacksize;
}
ret = -EINVAL;
if (i != repl->num_entries)
goto out_free;
/* Check hooks all assigned */
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
/* Only hooks which are valid */
if (!(repl->valid_hooks & (1 << i)))
continue;
if (newinfo->hook_entry[i] == 0xFFFFFFFF)
goto out_free;
if (newinfo->underflow[i] == 0xFFFFFFFF)
goto out_free;
}
if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
ret = -ELOOP;
goto out_free;
}
kvfree(offsets);
/* Finally, each sanity check must pass */
i = 0;
xt_entry_foreach(iter, entry0, newinfo->size) {
ret = find_check_entry(iter, net, repl->name, repl->size,
&alloc_state);
if (ret != 0)
break;
++i;
}
if (ret != 0) {
xt_entry_foreach(iter, entry0, newinfo->size) {
if (i-- == 0)
break;
cleanup_entry(iter, net);
}
return ret;
}
return ret;
out_free:
kvfree(offsets);
return ret;
}
static void
get_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct ipt_entry *iter;
unsigned int cpu;
unsigned int i;
for_each_possible_cpu(cpu) {
seqcount_t *s = &per_cpu(xt_recseq, cpu);
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
struct xt_counters *tmp;
u64 bcnt, pcnt;
unsigned int start;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
do {
start = read_seqcount_begin(s);
bcnt = tmp->bcnt;
pcnt = tmp->pcnt;
} while (read_seqcount_retry(s, start));
ADD_COUNTER(counters[i], bcnt, pcnt);
++i; /* macro does multi eval of i */
cond_resched();
}
}
}
static void get_old_counters(const struct xt_table_info *t,
struct xt_counters counters[])
{
struct ipt_entry *iter;
unsigned int cpu, i;
for_each_possible_cpu(cpu) {
i = 0;
xt_entry_foreach(iter, t->entries, t->size) {
const struct xt_counters *tmp;
tmp = xt_get_per_cpu_counter(&iter->counters, cpu);
ADD_COUNTER(counters[i], tmp->bcnt, tmp->pcnt);
++i; /* macro does multi eval of i */
}
cond_resched();
}
}
static struct xt_counters *alloc_counters(const struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
/* We need atomic snapshot of counters: rest doesn't change
(other than comefrom, which userspace doesn't care
about). */
countersize = sizeof(struct xt_counters) * private->number;
counters = vzalloc(countersize);
if (counters == NULL)
return ERR_PTR(-ENOMEM);
get_counters(private, counters);
return counters;
}
static int
copy_entries_to_user(unsigned int total_size,
const struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
const struct ipt_entry *e;
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
int ret = 0;
const void *loc_cpu_entry;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
loc_cpu_entry = private->entries;
/* FIXME: use iterator macros --RR */
/* ... then go back and fix counters and names */
for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
unsigned int i;
const struct xt_entry_match *m;
const struct xt_entry_target *t;
e = loc_cpu_entry + off;
if (copy_to_user(userptr + off, e, sizeof(*e))) {
ret = -EFAULT;
goto free_counters;
}
if (copy_to_user(userptr + off
+ offsetof(struct ipt_entry, counters),
&counters[num],
sizeof(counters[num])) != 0) {
ret = -EFAULT;
goto free_counters;
}
for (i = sizeof(struct ipt_entry);
i < e->target_offset;
i += m->u.match_size) {
m = (void *)e + i;
if (xt_match_to_user(m, userptr + off + i)) {
ret = -EFAULT;
goto free_counters;
}
}
t = ipt_get_target_c(e);
if (xt_target_to_user(t, userptr + off + e->target_offset)) {
ret = -EFAULT;
goto free_counters;
}
}
free_counters:
vfree(counters);
return ret;
}
#ifdef CONFIG_COMPAT
static void compat_standard_from_user(void *dst, const void *src)
{
int v = *(compat_int_t *)src;
if (v > 0)
v += xt_compat_calc_jump(AF_INET, v);
memcpy(dst, &v, sizeof(v));
}
static int compat_standard_to_user(void __user *dst, const void *src)
{
compat_int_t cv = *(int *)src;
if (cv > 0)
cv -= xt_compat_calc_jump(AF_INET, cv);
return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
}
static int compat_calc_entry(const struct ipt_entry *e,
const struct xt_table_info *info,
const void *base, struct xt_table_info *newinfo)
{
const struct xt_entry_match *ematch;
const struct xt_entry_target *t;
unsigned int entry_offset;
int off, i, ret;
off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
entry_offset = (void *)e - base;
xt_ematch_foreach(ematch, e)
off += xt_compat_match_offset(ematch->u.kernel.match);
t = ipt_get_target_c(e);
off += xt_compat_target_offset(t->u.kernel.target);
newinfo->size -= off;
ret = xt_compat_add_offset(AF_INET, entry_offset, off);
if (ret)
return ret;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
if (info->hook_entry[i] &&
(e < (struct ipt_entry *)(base + info->hook_entry[i])))
newinfo->hook_entry[i] -= off;
if (info->underflow[i] &&
(e < (struct ipt_entry *)(base + info->underflow[i])))
newinfo->underflow[i] -= off;
}
return 0;
}
static int compat_table_info(const struct xt_table_info *info,
struct xt_table_info *newinfo)
{
struct ipt_entry *iter;
const void *loc_cpu_entry;
int ret;
if (!newinfo || !info)
return -EINVAL;
/* we dont care about newinfo->entries */
memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
newinfo->initial_entries = 0;
loc_cpu_entry = info->entries;
xt_compat_init_offsets(AF_INET, info->number);
xt_entry_foreach(iter, loc_cpu_entry, info->size) {
ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
if (ret != 0)
return ret;
}
return 0;
}
#endif
static int get_info(struct net *net, void __user *user,
const int *len, int compat)
{
char name[XT_TABLE_MAXNAMELEN];
struct xt_table *t;
int ret;
if (*len != sizeof(struct ipt_getinfo))
return -EINVAL;
if (copy_from_user(name, user, sizeof(name)) != 0)
return -EFAULT;
name[XT_TABLE_MAXNAMELEN-1] = '\0';
#ifdef CONFIG_COMPAT
if (compat)
xt_compat_lock(AF_INET);
#endif
t = xt_request_find_table_lock(net, AF_INET, name);
if (!IS_ERR(t)) {
struct ipt_getinfo info;
const struct xt_table_info *private = t->private;
#ifdef CONFIG_COMPAT
struct xt_table_info tmp;
if (compat) {
ret = compat_table_info(private, &tmp);
xt_compat_flush_offsets(AF_INET);
private = &tmp;
}
#endif
memset(&info, 0, sizeof(info));
info.valid_hooks = t->valid_hooks;
memcpy(info.hook_entry, private->hook_entry,
sizeof(info.hook_entry));
memcpy(info.underflow, private->underflow,
sizeof(info.underflow));
info.num_entries = private->number;
info.size = private->size;
strcpy(info.name, name);
if (copy_to_user(user, &info, *len) != 0)
ret = -EFAULT;
else
ret = 0;
xt_table_unlock(t);
module_put(t->me);
} else
ret = PTR_ERR(t);
#ifdef CONFIG_COMPAT
if (compat)
xt_compat_unlock(AF_INET);
#endif
return ret;
}
static int
get_entries(struct net *net, struct ipt_get_entries __user *uptr,
const int *len)
{
int ret;
struct ipt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get))
return -EINVAL;
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct ipt_get_entries) + get.size)
return -EINVAL;
get.name[sizeof(get.name) - 1] = '\0';
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
if (get.size == private->size)
ret = copy_entries_to_user(private->size,
t, uptr->entrytable);
else
ret = -EAGAIN;
module_put(t->me);
xt_table_unlock(t);
} else
ret = PTR_ERR(t);
return ret;
}
static int
__do_replace(struct net *net, const char *name, unsigned int valid_hooks,
struct xt_table_info *newinfo, unsigned int num_counters,
void __user *counters_ptr)
{
int ret;
struct xt_table *t;
struct xt_table_info *oldinfo;
struct xt_counters *counters;
struct ipt_entry *iter;
ret = 0;
counters = vzalloc(num_counters * sizeof(struct xt_counters));
if (!counters) {
ret = -ENOMEM;
goto out;
}
t = xt_request_find_table_lock(net, AF_INET, name);
if (IS_ERR(t)) {
ret = PTR_ERR(t);
goto free_newinfo_counters_untrans;
}
/* You lied! */
if (valid_hooks != t->valid_hooks) {
ret = -EINVAL;
goto put_module;
}
oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
if (!oldinfo)
goto put_module;
/* Update module usage count based on number of rules */
if ((oldinfo->number > oldinfo->initial_entries) ||
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
if ((oldinfo->number > oldinfo->initial_entries) &&
(newinfo->number <= oldinfo->initial_entries))
module_put(t->me);
get_old_counters(oldinfo, counters);
/* Decrease module usage counts and free resource */
xt_entry_foreach(iter, oldinfo->entries, oldinfo->size)
cleanup_entry(iter, net);
xt_free_table_info(oldinfo);
if (copy_to_user(counters_ptr, counters,
sizeof(struct xt_counters) * num_counters) != 0) {
/* Silent error, can't fail, new table is already in place */
net_warn_ratelimited("iptables: counters copy to user failed while replacing table\n");
}
vfree(counters);
xt_table_unlock(t);
return ret;
put_module:
module_put(t->me);
xt_table_unlock(t);
free_newinfo_counters_untrans:
vfree(counters);
out:
return ret;
}
static int
do_replace(struct net *net, const void __user *user, unsigned int len)
{
int ret;
struct ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct ipt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, tmp.counters);
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
static int
do_add_counters(struct net *net, const void __user *user,
unsigned int len, int compat)
{
unsigned int i;
struct xt_counters_info tmp;
struct xt_counters *paddc;
struct xt_table *t;
const struct xt_table_info *private;
int ret = 0;
struct ipt_entry *iter;
unsigned int addend;
paddc = xt_copy_counters_from_user(user, len, &tmp, compat);
if (IS_ERR(paddc))
return PTR_ERR(paddc);
t = xt_find_table_lock(net, AF_INET, tmp.name);
if (IS_ERR(t)) {
ret = PTR_ERR(t);
goto free;
}
local_bh_disable();
private = t->private;
if (private->number != tmp.num_counters) {
ret = -EINVAL;
goto unlock_up_free;
}
i = 0;
addend = xt_write_recseq_begin();
xt_entry_foreach(iter, private->entries, private->size) {
struct xt_counters *tmp;
tmp = xt_get_this_cpu_counter(&iter->counters);
ADD_COUNTER(*tmp, paddc[i].bcnt, paddc[i].pcnt);
++i;
}
xt_write_recseq_end(addend);
unlock_up_free:
local_bh_enable();
xt_table_unlock(t);
module_put(t->me);
free:
vfree(paddc);
return ret;
}
#ifdef CONFIG_COMPAT
struct compat_ipt_replace {
char name[XT_TABLE_MAXNAMELEN];
u32 valid_hooks;
u32 num_entries;
u32 size;
u32 hook_entry[NF_INET_NUMHOOKS];
u32 underflow[NF_INET_NUMHOOKS];
u32 num_counters;
compat_uptr_t counters; /* struct xt_counters * */
struct compat_ipt_entry entries[0];
};
static int
compat_copy_entry_to_user(struct ipt_entry *e, void __user **dstptr,
unsigned int *size, struct xt_counters *counters,
unsigned int i)
{
struct xt_entry_target *t;
struct compat_ipt_entry __user *ce;
u_int16_t target_offset, next_offset;
compat_uint_t origsize;
const struct xt_entry_match *ematch;
int ret = 0;
origsize = *size;
ce = *dstptr;
if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 ||
copy_to_user(&ce->counters, &counters[i],
sizeof(counters[i])) != 0)
return -EFAULT;
*dstptr += sizeof(struct compat_ipt_entry);
*size -= sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
xt_ematch_foreach(ematch, e) {
ret = xt_compat_match_to_user(ematch, dstptr, size);
if (ret != 0)
return ret;
}
target_offset = e->target_offset - (origsize - *size);
t = ipt_get_target(e);
ret = xt_compat_target_to_user(t, dstptr, size);
if (ret)
return ret;
next_offset = e->next_offset - (origsize - *size);
if (put_user(target_offset, &ce->target_offset) != 0 ||
put_user(next_offset, &ce->next_offset) != 0)
return -EFAULT;
return 0;
}
static int
compat_find_calc_match(struct xt_entry_match *m,
const struct ipt_ip *ip,
int *size)
{
struct xt_match *match;
match = xt_request_find_match(NFPROTO_IPV4, m->u.user.name,
m->u.user.revision);
if (IS_ERR(match))
return PTR_ERR(match);
m->u.kernel.match = match;
*size += xt_compat_match_offset(match);
return 0;
}
static void compat_release_entry(struct compat_ipt_entry *e)
{
struct xt_entry_target *t;
struct xt_entry_match *ematch;
/* Cleanup all matches */
xt_ematch_foreach(ematch, e)
module_put(ematch->u.kernel.match->me);
t = compat_ipt_get_target(e);
module_put(t->u.kernel.target->me);
}
static int
check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
struct xt_table_info *newinfo,
unsigned int *size,
const unsigned char *base,
const unsigned char *limit)
{
struct xt_entry_match *ematch;
struct xt_entry_target *t;
struct xt_target *target;
unsigned int entry_offset;
unsigned int j;
int ret, off;
if ((unsigned long)e % __alignof__(struct compat_ipt_entry) != 0 ||
(unsigned char *)e + sizeof(struct compat_ipt_entry) >= limit ||
(unsigned char *)e + e->next_offset > limit)
return -EINVAL;
if (e->next_offset < sizeof(struct compat_ipt_entry) +
sizeof(struct compat_xt_entry_target))
return -EINVAL;
if (!ip_checkentry(&e->ip))
return -EINVAL;
ret = xt_compat_check_entry_offsets(e, e->elems,
e->target_offset, e->next_offset);
if (ret)
return ret;
off = sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
entry_offset = (void *)e - (void *)base;
j = 0;
xt_ematch_foreach(ematch, e) {
ret = compat_find_calc_match(ematch, &e->ip, &off);
if (ret != 0)
goto release_matches;
++j;
}
t = compat_ipt_get_target(e);
target = xt_request_find_target(NFPROTO_IPV4, t->u.user.name,
t->u.user.revision);
if (IS_ERR(target)) {
ret = PTR_ERR(target);
goto release_matches;
}
t->u.kernel.target = target;
off += xt_compat_target_offset(target);
*size += off;
ret = xt_compat_add_offset(AF_INET, entry_offset, off);
if (ret)
goto out;
return 0;
out:
module_put(t->u.kernel.target->me);
release_matches:
xt_ematch_foreach(ematch, e) {
if (j-- == 0)
break;
module_put(ematch->u.kernel.match->me);
}
return ret;
}
static void
compat_copy_entry_from_user(struct compat_ipt_entry *e, void **dstptr,
unsigned int *size,
struct xt_table_info *newinfo, unsigned char *base)
{
struct xt_entry_target *t;
struct ipt_entry *de;
unsigned int origsize;
int h;
struct xt_entry_match *ematch;
origsize = *size;
de = *dstptr;
memcpy(de, e, sizeof(struct ipt_entry));
memcpy(&de->counters, &e->counters, sizeof(e->counters));
*dstptr += sizeof(struct ipt_entry);
*size += sizeof(struct ipt_entry) - sizeof(struct compat_ipt_entry);
xt_ematch_foreach(ematch, e)
xt_compat_match_from_user(ematch, dstptr, size);
de->target_offset = e->target_offset - (origsize - *size);
t = compat_ipt_get_target(e);
xt_compat_target_from_user(t, dstptr, size);
de->next_offset = e->next_offset - (origsize - *size);
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if ((unsigned char *)de - base < newinfo->hook_entry[h])
newinfo->hook_entry[h] -= origsize - *size;
if ((unsigned char *)de - base < newinfo->underflow[h])
newinfo->underflow[h] -= origsize - *size;
}
}
static int
translate_compat_table(struct net *net,
struct xt_table_info **pinfo,
void **pentry0,
const struct compat_ipt_replace *compatr)
{
unsigned int i, j;
struct xt_table_info *newinfo, *info;
void *pos, *entry0, *entry1;
struct compat_ipt_entry *iter0;
struct ipt_replace repl;
unsigned int size;
int ret;
info = *pinfo;
entry0 = *pentry0;
size = compatr->size;
info->number = compatr->num_entries;
j = 0;
xt_compat_lock(AF_INET);
xt_compat_init_offsets(AF_INET, compatr->num_entries);
/* Walk through entries, checking offsets. */
xt_entry_foreach(iter0, entry0, compatr->size) {
ret = check_compat_entry_size_and_hooks(iter0, info, &size,
entry0,
entry0 + compatr->size);
if (ret != 0)
goto out_unlock;
++j;
}
ret = -EINVAL;
if (j != compatr->num_entries)
goto out_unlock;
ret = -ENOMEM;
newinfo = xt_alloc_table_info(size);
if (!newinfo)
goto out_unlock;
newinfo->number = compatr->num_entries;
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
newinfo->hook_entry[i] = compatr->hook_entry[i];
newinfo->underflow[i] = compatr->underflow[i];
}
entry1 = newinfo->entries;
pos = entry1;
size = compatr->size;
xt_entry_foreach(iter0, entry0, compatr->size)
compat_copy_entry_from_user(iter0, &pos, &size,
newinfo, entry1);
/* all module references in entry0 are now gone.
* entry1/newinfo contains a 64bit ruleset that looks exactly as
* generated by 64bit userspace.
*
* Call standard translate_table() to validate all hook_entrys,
* underflows, check for loops, etc.
*/
xt_compat_flush_offsets(AF_INET);
xt_compat_unlock(AF_INET);
memcpy(&repl, compatr, sizeof(*compatr));
for (i = 0; i < NF_INET_NUMHOOKS; i++) {
repl.hook_entry[i] = newinfo->hook_entry[i];
repl.underflow[i] = newinfo->underflow[i];
}
repl.num_counters = 0;
repl.counters = NULL;
repl.size = newinfo->size;
ret = translate_table(net, newinfo, entry1, &repl);
if (ret)
goto free_newinfo;
*pinfo = newinfo;
*pentry0 = entry1;
xt_free_table_info(info);
return 0;
free_newinfo:
xt_free_table_info(newinfo);
return ret;
out_unlock:
xt_compat_flush_offsets(AF_INET);
xt_compat_unlock(AF_INET);
xt_entry_foreach(iter0, entry0, compatr->size) {
if (j-- == 0)
break;
compat_release_entry(iter0);
}
return ret;
}
static int
compat_do_replace(struct net *net, void __user *user, unsigned int len)
{
int ret;
struct compat_ipt_replace tmp;
struct xt_table_info *newinfo;
void *loc_cpu_entry;
struct ipt_entry *iter;
if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
return -EFAULT;
/* overflow check */
if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
return -ENOMEM;
if (tmp.num_counters == 0)
return -EINVAL;
tmp.name[sizeof(tmp.name)-1] = 0;
newinfo = xt_alloc_table_info(tmp.size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
tmp.size) != 0) {
ret = -EFAULT;
goto free_newinfo;
}
ret = translate_compat_table(net, &newinfo, &loc_cpu_entry, &tmp);
if (ret != 0)
goto free_newinfo;
ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
tmp.num_counters, compat_ptr(tmp.counters));
if (ret)
goto free_newinfo_untrans;
return 0;
free_newinfo_untrans:
xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
cleanup_entry(iter, net);
free_newinfo:
xt_free_table_info(newinfo);
return ret;
}
static int
compat_do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user,
unsigned int len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_SET_REPLACE:
ret = compat_do_replace(sock_net(sk), user, len);
break;
case IPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(sock_net(sk), user, len, 1);
break;
default:
ret = -EINVAL;
}
return ret;
}
struct compat_ipt_get_entries {
char name[XT_TABLE_MAXNAMELEN];
compat_uint_t size;
struct compat_ipt_entry entrytable[0];
};
static int
compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
void __user *userptr)
{
struct xt_counters *counters;
const struct xt_table_info *private = table->private;
void __user *pos;
unsigned int size;
int ret = 0;
unsigned int i = 0;
struct ipt_entry *iter;
counters = alloc_counters(table);
if (IS_ERR(counters))
return PTR_ERR(counters);
pos = userptr;
size = total_size;
xt_entry_foreach(iter, private->entries, total_size) {
ret = compat_copy_entry_to_user(iter, &pos,
&size, counters, i++);
if (ret != 0)
break;
}
vfree(counters);
return ret;
}
static int
compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
int *len)
{
int ret;
struct compat_ipt_get_entries get;
struct xt_table *t;
if (*len < sizeof(get))
return -EINVAL;
if (copy_from_user(&get, uptr, sizeof(get)) != 0)
return -EFAULT;
if (*len != sizeof(struct compat_ipt_get_entries) + get.size)
return -EINVAL;
get.name[sizeof(get.name) - 1] = '\0';
xt_compat_lock(AF_INET);
t = xt_find_table_lock(net, AF_INET, get.name);
if (!IS_ERR(t)) {
const struct xt_table_info *private = t->private;
struct xt_table_info info;
ret = compat_table_info(private, &info);
if (!ret && get.size == info.size)
ret = compat_copy_entries_to_user(private->size,
t, uptr->entrytable);
else if (!ret)
ret = -EAGAIN;
xt_compat_flush_offsets(AF_INET);
module_put(t->me);
xt_table_unlock(t);
} else
ret = PTR_ERR(t);
xt_compat_unlock(AF_INET);
return ret;
}
static int do_ipt_get_ctl(struct sock *, int, void __user *, int *);
static int
compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_GET_INFO:
ret = get_info(sock_net(sk), user, len, 1);
break;
case IPT_SO_GET_ENTRIES:
ret = compat_get_entries(sock_net(sk), user, len);
break;
default:
ret = do_ipt_get_ctl(sk, cmd, user, len);
}
return ret;
}
#endif
static int
do_ipt_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_SET_REPLACE:
ret = do_replace(sock_net(sk), user, len);
break;
case IPT_SO_SET_ADD_COUNTERS:
ret = do_add_counters(sock_net(sk), user, len, 0);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int
do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
int ret;
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case IPT_SO_GET_INFO:
ret = get_info(sock_net(sk), user, len, 0);
break;
case IPT_SO_GET_ENTRIES:
ret = get_entries(sock_net(sk), user, len);
break;
case IPT_SO_GET_REVISION_MATCH:
case IPT_SO_GET_REVISION_TARGET: {
struct xt_get_revision rev;
int target;
if (*len != sizeof(rev)) {
ret = -EINVAL;
break;
}
if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
ret = -EFAULT;
break;
}
rev.name[sizeof(rev.name)-1] = 0;
if (cmd == IPT_SO_GET_REVISION_TARGET)
target = 1;
else
target = 0;
try_then_request_module(xt_find_revision(AF_INET, rev.name,
rev.revision,
target, &ret),
"ipt_%s", rev.name);
break;
}
default:
ret = -EINVAL;
}
return ret;
}
static void __ipt_unregister_table(struct net *net, struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
struct module *table_owner = table->me;
struct ipt_entry *iter;
private = xt_unregister_table(table);
/* Decrease module usage counts and free resources */
loc_cpu_entry = private->entries;
xt_entry_foreach(iter, loc_cpu_entry, private->size)
cleanup_entry(iter, net);
if (private->number > private->initial_entries)
module_put(table_owner);
xt_free_table_info(private);
}
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
const struct nf_hook_ops *ops, struct xt_table **res)
{
int ret;
struct xt_table_info *newinfo;
struct xt_table_info bootstrap = {0};
void *loc_cpu_entry;
struct xt_table *new_table;
newinfo = xt_alloc_table_info(repl->size);
if (!newinfo)
return -ENOMEM;
loc_cpu_entry = newinfo->entries;
memcpy(loc_cpu_entry, repl->entries, repl->size);
ret = translate_table(net, newinfo, loc_cpu_entry, repl);
if (ret != 0)
goto out_free;
new_table = xt_register_table(net, table, &bootstrap, newinfo);
if (IS_ERR(new_table)) {
ret = PTR_ERR(new_table);
goto out_free;
}
/* set res now, will see skbs right after nf_register_net_hooks */
WRITE_ONCE(*res, new_table);
ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks));
if (ret != 0) {
__ipt_unregister_table(net, new_table);
*res = NULL;
}
return ret;
out_free:
xt_free_table_info(newinfo);
return ret;
}
void ipt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops)
{
nf_unregister_net_hooks(net, ops, hweight32(table->valid_hooks));
__ipt_unregister_table(net, table);
}
/* Returns 1 if the type and code is matched by the range, 0 otherwise */
static inline bool
icmp_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
u_int8_t type, u_int8_t code,
bool invert)
{
return ((test_type == 0xFF) ||
(type == test_type && code >= min_code && code <= max_code))
^ invert;
}
static bool
icmp_match(const struct sk_buff *skb, struct xt_action_param *par)
{
const struct icmphdr *ic;
struct icmphdr _icmph;
const struct ipt_icmp *icmpinfo = par->matchinfo;
/* Must not be a fragment. */
if (par->fragoff != 0)
return false;
ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
if (ic == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
par->hotdrop = true;
return false;
}
return icmp_type_code_match(icmpinfo->type,
icmpinfo->code[0],
icmpinfo->code[1],
ic->type, ic->code,
!!(icmpinfo->invflags&IPT_ICMP_INV));
}
static int icmp_checkentry(const struct xt_mtchk_param *par)
{
const struct ipt_icmp *icmpinfo = par->matchinfo;
/* Must specify no unknown invflags */
return (icmpinfo->invflags & ~IPT_ICMP_INV) ? -EINVAL : 0;
}
static struct xt_target ipt_builtin_tg[] __read_mostly = {
{
.name = XT_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = NFPROTO_IPV4,
#ifdef CONFIG_COMPAT
.compatsize = sizeof(compat_int_t),
.compat_from_user = compat_standard_from_user,
.compat_to_user = compat_standard_to_user,
#endif
},
{
.name = XT_ERROR_TARGET,
.target = ipt_error,
.targetsize = XT_FUNCTION_MAXNAMELEN,
.family = NFPROTO_IPV4,
},
};
static struct nf_sockopt_ops ipt_sockopts = {
.pf = PF_INET,
.set_optmin = IPT_BASE_CTL,
.set_optmax = IPT_SO_SET_MAX+1,
.set = do_ipt_set_ctl,
#ifdef CONFIG_COMPAT
.compat_set = compat_do_ipt_set_ctl,
#endif
.get_optmin = IPT_BASE_CTL,
.get_optmax = IPT_SO_GET_MAX+1,
.get = do_ipt_get_ctl,
#ifdef CONFIG_COMPAT
.compat_get = compat_do_ipt_get_ctl,
#endif
.owner = THIS_MODULE,
};
static struct xt_match ipt_builtin_mt[] __read_mostly = {
{
.name = "icmp",
.match = icmp_match,
.matchsize = sizeof(struct ipt_icmp),
.checkentry = icmp_checkentry,
.proto = IPPROTO_ICMP,
.family = NFPROTO_IPV4,
},
};
static int __net_init ip_tables_net_init(struct net *net)
{
return xt_proto_init(net, NFPROTO_IPV4);
}
static void __net_exit ip_tables_net_exit(struct net *net)
{
xt_proto_fini(net, NFPROTO_IPV4);
}
static struct pernet_operations ip_tables_net_ops = {
.init = ip_tables_net_init,
.exit = ip_tables_net_exit,
};
static int __init ip_tables_init(void)
{
int ret;
ret = register_pernet_subsys(&ip_tables_net_ops);
if (ret < 0)
goto err1;
/* No one else will be downing sem now, so we won't sleep */
ret = xt_register_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
if (ret < 0)
goto err2;
ret = xt_register_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
if (ret < 0)
goto err4;
/* Register setsockopt */
ret = nf_register_sockopt(&ipt_sockopts);
if (ret < 0)
goto err5;
return 0;
err5:
xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
err4:
xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
err2:
unregister_pernet_subsys(&ip_tables_net_ops);
err1:
return ret;
}
static void __exit ip_tables_fini(void)
{
nf_unregister_sockopt(&ipt_sockopts);
xt_unregister_matches(ipt_builtin_mt, ARRAY_SIZE(ipt_builtin_mt));
xt_unregister_targets(ipt_builtin_tg, ARRAY_SIZE(ipt_builtin_tg));
unregister_pernet_subsys(&ip_tables_net_ops);
}
EXPORT_SYMBOL(ipt_register_table);
EXPORT_SYMBOL(ipt_unregister_table);
EXPORT_SYMBOL(ipt_do_table);
module_init(ip_tables_init);
module_exit(ip_tables_fini);
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_99_1 |
crossvul-cpp_data_bad_178_0 | /*
** class.c - Class class
**
** See Copyright Notice in mruby.h
*/
#include <stdarg.h>
#include <mruby.h>
#include <mruby/array.h>
#include <mruby/class.h>
#include <mruby/numeric.h>
#include <mruby/proc.h>
#include <mruby/string.h>
#include <mruby/variable.h>
#include <mruby/error.h>
#include <mruby/data.h>
#include <mruby/istruct.h>
KHASH_DEFINE(mt, mrb_sym, mrb_method_t, TRUE, kh_int_hash_func, kh_int_hash_equal)
void
mrb_gc_mark_mt(mrb_state *mrb, struct RClass *c)
{
khiter_t k;
khash_t(mt) *h = c->mt;
if (!h) return;
for (k = kh_begin(h); k != kh_end(h); k++) {
if (kh_exist(h, k)) {
mrb_method_t m = kh_value(h, k);
if (MRB_METHOD_PROC_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
mrb_gc_mark(mrb, (struct RBasic*)p);
}
}
}
}
size_t
mrb_gc_mark_mt_size(mrb_state *mrb, struct RClass *c)
{
khash_t(mt) *h = c->mt;
if (!h) return 0;
return kh_size(h);
}
void
mrb_gc_free_mt(mrb_state *mrb, struct RClass *c)
{
kh_destroy(mt, mrb, c->mt);
}
void
mrb_class_name_class(mrb_state *mrb, struct RClass *outer, struct RClass *c, mrb_sym id)
{
mrb_value name;
mrb_sym nsym = mrb_intern_lit(mrb, "__classname__");
if (mrb_obj_iv_defined(mrb, (struct RObject*)c, nsym)) return;
if (outer == NULL || outer == mrb->object_class) {
name = mrb_symbol_value(id);
}
else {
name = mrb_class_path(mrb, outer);
if (mrb_nil_p(name)) { /* unnamed outer class */
if (outer != mrb->object_class) {
mrb_obj_iv_set(mrb, (struct RObject*)c, mrb_intern_lit(mrb, "__outer__"),
mrb_obj_value(outer));
}
return;
}
mrb_str_cat_cstr(mrb, name, "::");
mrb_str_cat_cstr(mrb, name, mrb_sym2name(mrb, id));
}
mrb_obj_iv_set(mrb, (struct RObject*)c, nsym, name);
}
static void
setup_class(mrb_state *mrb, struct RClass *outer, struct RClass *c, mrb_sym id)
{
mrb_class_name_class(mrb, outer, c, id);
mrb_obj_iv_set(mrb, (struct RObject*)outer, id, mrb_obj_value(c));
}
#define make_metaclass(mrb, c) prepare_singleton_class((mrb), (struct RBasic*)(c))
static void
prepare_singleton_class(mrb_state *mrb, struct RBasic *o)
{
struct RClass *sc, *c;
if (o->c->tt == MRB_TT_SCLASS) return;
sc = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_SCLASS, mrb->class_class);
sc->flags |= MRB_FLAG_IS_INHERITED;
sc->mt = kh_init(mt, mrb);
sc->iv = 0;
if (o->tt == MRB_TT_CLASS) {
c = (struct RClass*)o;
if (!c->super) {
sc->super = mrb->class_class;
}
else {
sc->super = c->super->c;
}
}
else if (o->tt == MRB_TT_SCLASS) {
c = (struct RClass*)o;
while (c->super->tt == MRB_TT_ICLASS)
c = c->super;
make_metaclass(mrb, c->super);
sc->super = c->super->c;
}
else {
sc->super = o->c;
prepare_singleton_class(mrb, (struct RBasic*)sc);
}
o->c = sc;
mrb_field_write_barrier(mrb, (struct RBasic*)o, (struct RBasic*)sc);
mrb_field_write_barrier(mrb, (struct RBasic*)sc, (struct RBasic*)o);
mrb_obj_iv_set(mrb, (struct RObject*)sc, mrb_intern_lit(mrb, "__attached__"), mrb_obj_value(o));
}
static struct RClass*
class_from_sym(mrb_state *mrb, struct RClass *klass, mrb_sym id)
{
mrb_value c = mrb_const_get(mrb, mrb_obj_value(klass), id);
mrb_check_type(mrb, c, MRB_TT_CLASS);
return mrb_class_ptr(c);
}
static struct RClass*
module_from_sym(mrb_state *mrb, struct RClass *klass, mrb_sym id)
{
mrb_value c = mrb_const_get(mrb, mrb_obj_value(klass), id);
mrb_check_type(mrb, c, MRB_TT_MODULE);
return mrb_class_ptr(c);
}
static mrb_bool
class_ptr_p(mrb_value obj)
{
switch (mrb_type(obj)) {
case MRB_TT_CLASS:
case MRB_TT_SCLASS:
case MRB_TT_MODULE:
return TRUE;
default:
return FALSE;
}
}
static void
check_if_class_or_module(mrb_state *mrb, mrb_value obj)
{
if (!class_ptr_p(obj)) {
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not a class/module", mrb_inspect(mrb, obj));
}
}
static struct RClass*
define_module(mrb_state *mrb, mrb_sym name, struct RClass *outer)
{
struct RClass *m;
if (mrb_const_defined_at(mrb, mrb_obj_value(outer), name)) {
return module_from_sym(mrb, outer, name);
}
m = mrb_module_new(mrb);
setup_class(mrb, outer, m, name);
return m;
}
MRB_API struct RClass*
mrb_define_module_id(mrb_state *mrb, mrb_sym name)
{
return define_module(mrb, name, mrb->object_class);
}
MRB_API struct RClass*
mrb_define_module(mrb_state *mrb, const char *name)
{
return define_module(mrb, mrb_intern_cstr(mrb, name), mrb->object_class);
}
MRB_API struct RClass*
mrb_vm_define_module(mrb_state *mrb, mrb_value outer, mrb_sym id)
{
check_if_class_or_module(mrb, outer);
if (mrb_const_defined_at(mrb, outer, id)) {
mrb_value old = mrb_const_get(mrb, outer, id);
if (mrb_type(old) != MRB_TT_MODULE) {
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not a module", mrb_inspect(mrb, old));
}
return mrb_class_ptr(old);
}
return define_module(mrb, id, mrb_class_ptr(outer));
}
MRB_API struct RClass*
mrb_define_module_under(mrb_state *mrb, struct RClass *outer, const char *name)
{
mrb_sym id = mrb_intern_cstr(mrb, name);
struct RClass * c = define_module(mrb, id, outer);
setup_class(mrb, outer, c, id);
return c;
}
static struct RClass*
find_origin(struct RClass *c)
{
MRB_CLASS_ORIGIN(c);
return c;
}
static struct RClass*
define_class(mrb_state *mrb, mrb_sym name, struct RClass *super, struct RClass *outer)
{
struct RClass * c;
if (mrb_const_defined_at(mrb, mrb_obj_value(outer), name)) {
c = class_from_sym(mrb, outer, name);
MRB_CLASS_ORIGIN(c);
if (super && mrb_class_real(c->super) != super) {
mrb_raisef(mrb, E_TYPE_ERROR, "superclass mismatch for Class %S (%S not %S)",
mrb_sym2str(mrb, name),
mrb_obj_value(c->super), mrb_obj_value(super));
}
return c;
}
c = mrb_class_new(mrb, super);
setup_class(mrb, outer, c, name);
return c;
}
MRB_API struct RClass*
mrb_define_class_id(mrb_state *mrb, mrb_sym name, struct RClass *super)
{
if (!super) {
mrb_warn(mrb, "no super class for '%S', Object assumed", mrb_sym2str(mrb, name));
}
return define_class(mrb, name, super, mrb->object_class);
}
MRB_API struct RClass*
mrb_define_class(mrb_state *mrb, const char *name, struct RClass *super)
{
return mrb_define_class_id(mrb, mrb_intern_cstr(mrb, name), super);
}
static mrb_value mrb_bob_init(mrb_state *mrb, mrb_value);
#ifdef MRB_METHOD_CACHE
static void mc_clear_all(mrb_state *mrb);
static void mc_clear_by_class(mrb_state *mrb, struct RClass*);
static void mc_clear_by_id(mrb_state *mrb, struct RClass*, mrb_sym);
#else
#define mc_clear_all(mrb)
#define mc_clear_by_class(mrb,c)
#define mc_clear_by_id(mrb,c,s)
#endif
static void
mrb_class_inherited(mrb_state *mrb, struct RClass *super, struct RClass *klass)
{
mrb_value s;
mrb_sym mid;
if (!super)
super = mrb->object_class;
super->flags |= MRB_FLAG_IS_INHERITED;
s = mrb_obj_value(super);
mc_clear_by_class(mrb, klass);
mid = mrb_intern_lit(mrb, "inherited");
if (!mrb_func_basic_p(mrb, s, mid, mrb_bob_init)) {
mrb_value c = mrb_obj_value(klass);
mrb_funcall_argv(mrb, s, mid, 1, &c);
}
}
MRB_API struct RClass*
mrb_vm_define_class(mrb_state *mrb, mrb_value outer, mrb_value super, mrb_sym id)
{
struct RClass *s;
struct RClass *c;
if (!mrb_nil_p(super)) {
if (mrb_type(super) != MRB_TT_CLASS) {
mrb_raisef(mrb, E_TYPE_ERROR, "superclass must be a Class (%S given)",
mrb_inspect(mrb, super));
}
s = mrb_class_ptr(super);
}
else {
s = 0;
}
check_if_class_or_module(mrb, outer);
if (mrb_const_defined_at(mrb, outer, id)) {
mrb_value old = mrb_const_get(mrb, outer, id);
if (mrb_type(old) != MRB_TT_CLASS) {
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not a class", mrb_inspect(mrb, old));
}
c = mrb_class_ptr(old);
if (s) {
/* check super class */
if (mrb_class_real(c->super) != s) {
mrb_raisef(mrb, E_TYPE_ERROR, "superclass mismatch for class %S", old);
}
}
return c;
}
c = define_class(mrb, id, s, mrb_class_ptr(outer));
mrb_class_inherited(mrb, mrb_class_real(c->super), c);
return c;
}
MRB_API mrb_bool
mrb_class_defined(mrb_state *mrb, const char *name)
{
mrb_value sym = mrb_check_intern_cstr(mrb, name);
if (mrb_nil_p(sym)) {
return FALSE;
}
return mrb_const_defined(mrb, mrb_obj_value(mrb->object_class), mrb_symbol(sym));
}
MRB_API mrb_bool
mrb_class_defined_under(mrb_state *mrb, struct RClass *outer, const char *name)
{
mrb_value sym = mrb_check_intern_cstr(mrb, name);
if (mrb_nil_p(sym)) {
return FALSE;
}
return mrb_const_defined_at(mrb, mrb_obj_value(outer), mrb_symbol(sym));
}
MRB_API struct RClass*
mrb_class_get_under(mrb_state *mrb, struct RClass *outer, const char *name)
{
return class_from_sym(mrb, outer, mrb_intern_cstr(mrb, name));
}
MRB_API struct RClass*
mrb_class_get(mrb_state *mrb, const char *name)
{
return mrb_class_get_under(mrb, mrb->object_class, name);
}
MRB_API struct RClass*
mrb_exc_get(mrb_state *mrb, const char *name)
{
struct RClass *exc, *e;
mrb_value c = mrb_const_get(mrb, mrb_obj_value(mrb->object_class),
mrb_intern_cstr(mrb, name));
if (mrb_type(c) != MRB_TT_CLASS) {
mrb_raise(mrb, mrb->eException_class, "exception corrupted");
}
exc = e = mrb_class_ptr(c);
while (e) {
if (e == mrb->eException_class)
return exc;
e = e->super;
}
return mrb->eException_class;
}
MRB_API struct RClass*
mrb_module_get_under(mrb_state *mrb, struct RClass *outer, const char *name)
{
return module_from_sym(mrb, outer, mrb_intern_cstr(mrb, name));
}
MRB_API struct RClass*
mrb_module_get(mrb_state *mrb, const char *name)
{
return mrb_module_get_under(mrb, mrb->object_class, name);
}
/*!
* Defines a class under the namespace of \a outer.
* \param outer a class which contains the new class.
* \param id name of the new class
* \param super a class from which the new class will derive.
* NULL means \c Object class.
* \return the created class
* \throw TypeError if the constant name \a name is already taken but
* the constant is not a \c Class.
* \throw NameError if the class is already defined but the class can not
* be reopened because its superclass is not \a super.
* \post top-level constant named \a name refers the returned class.
*
* \note if a class named \a name is already defined and its superclass is
* \a super, the function just returns the defined class.
*/
MRB_API struct RClass*
mrb_define_class_under(mrb_state *mrb, struct RClass *outer, const char *name, struct RClass *super)
{
mrb_sym id = mrb_intern_cstr(mrb, name);
struct RClass * c;
#if 0
if (!super) {
mrb_warn(mrb, "no super class for '%S::%S', Object assumed",
mrb_obj_value(outer), mrb_sym2str(mrb, id));
}
#endif
c = define_class(mrb, id, super, outer);
setup_class(mrb, outer, c, id);
return c;
}
MRB_API void
mrb_define_method_raw(mrb_state *mrb, struct RClass *c, mrb_sym mid, mrb_method_t m)
{
khash_t(mt) *h;
khiter_t k;
MRB_CLASS_ORIGIN(c);
h = c->mt;
if (MRB_FROZEN_P(c)) {
if (c->tt == MRB_TT_MODULE)
mrb_raise(mrb, E_FROZEN_ERROR, "can't modify frozen module");
else
mrb_raise(mrb, E_FROZEN_ERROR, "can't modify frozen class");
}
if (!h) h = c->mt = kh_init(mt, mrb);
k = kh_put(mt, mrb, h, mid);
kh_value(h, k) = m;
if (MRB_METHOD_PROC_P(m) && !MRB_METHOD_UNDEF_P(m)) {
struct RProc *p = MRB_METHOD_PROC(m);
p->flags |= MRB_PROC_SCOPE;
p->c = NULL;
mrb_field_write_barrier(mrb, (struct RBasic*)c, (struct RBasic*)p);
if (!MRB_PROC_ENV_P(p)) {
MRB_PROC_SET_TARGET_CLASS(p, c);
}
}
mc_clear_by_id(mrb, c, mid);
}
MRB_API void
mrb_define_method_id(mrb_state *mrb, struct RClass *c, mrb_sym mid, mrb_func_t func, mrb_aspec aspec)
{
mrb_method_t m;
int ai = mrb_gc_arena_save(mrb);
MRB_METHOD_FROM_FUNC(m, func);
mrb_define_method_raw(mrb, c, mid, m);
mrb_gc_arena_restore(mrb, ai);
}
MRB_API void
mrb_define_method(mrb_state *mrb, struct RClass *c, const char *name, mrb_func_t func, mrb_aspec aspec)
{
mrb_define_method_id(mrb, c, mrb_intern_cstr(mrb, name), func, aspec);
}
/* a function to raise NotImplementedError with current method name */
MRB_API void
mrb_notimplement(mrb_state *mrb)
{
const char *str;
mrb_int len;
mrb_callinfo *ci = mrb->c->ci;
if (ci->mid) {
str = mrb_sym2name_len(mrb, ci->mid, &len);
mrb_raisef(mrb, E_NOTIMP_ERROR,
"%S() function is unimplemented on this machine",
mrb_str_new_static(mrb, str, (size_t)len));
}
}
/* a function to be replacement of unimplemented method */
MRB_API mrb_value
mrb_notimplement_m(mrb_state *mrb, mrb_value self)
{
mrb_notimplement(mrb);
/* not reached */
return mrb_nil_value();
}
static mrb_value
check_type(mrb_state *mrb, mrb_value val, enum mrb_vtype t, const char *c, const char *m)
{
mrb_value tmp;
tmp = mrb_check_convert_type(mrb, val, t, c, m);
if (mrb_nil_p(tmp)) {
mrb_raisef(mrb, E_TYPE_ERROR, "expected %S", mrb_str_new_cstr(mrb, c));
}
return tmp;
}
static mrb_value
to_str(mrb_state *mrb, mrb_value val)
{
return check_type(mrb, val, MRB_TT_STRING, "String", "to_str");
}
static mrb_value
to_ary(mrb_state *mrb, mrb_value val)
{
return check_type(mrb, val, MRB_TT_ARRAY, "Array", "to_ary");
}
static mrb_value
to_hash(mrb_state *mrb, mrb_value val)
{
return check_type(mrb, val, MRB_TT_HASH, "Hash", "to_hash");
}
static mrb_sym
to_sym(mrb_state *mrb, mrb_value ss)
{
if (mrb_type(ss) == MRB_TT_SYMBOL) {
return mrb_symbol(ss);
}
else if (mrb_string_p(ss)) {
return mrb_intern_str(mrb, to_str(mrb, ss));
}
else {
mrb_value obj = mrb_funcall(mrb, ss, "inspect", 0);
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not a symbol", obj);
/* not reached */
return 0;
}
}
MRB_API mrb_int
mrb_get_argc(mrb_state *mrb)
{
mrb_int argc = mrb->c->ci->argc;
if (argc < 0) {
struct RArray *a = mrb_ary_ptr(mrb->c->stack[1]);
argc = ARY_LEN(a);
}
return argc;
}
MRB_API mrb_value*
mrb_get_argv(mrb_state *mrb)
{
mrb_int argc = mrb->c->ci->argc;
mrb_value *array_argv;
if (argc < 0) {
struct RArray *a = mrb_ary_ptr(mrb->c->stack[1]);
array_argv = ARY_PTR(a);
}
else {
array_argv = NULL;
}
return array_argv;
}
/*
retrieve arguments from mrb_state.
mrb_get_args(mrb, format, ...)
returns number of arguments parsed.
format specifiers:
string mruby type C type note
----------------------------------------------------------------------------------------------
o: Object [mrb_value]
C: class/module [mrb_value]
S: String [mrb_value] when ! follows, the value may be nil
A: Array [mrb_value] when ! follows, the value may be nil
H: Hash [mrb_value] when ! follows, the value may be nil
s: String [char*,mrb_int] Receive two arguments; s! gives (NULL,0) for nil
z: String [char*] NUL terminated string; z! gives NULL for nil
a: Array [mrb_value*,mrb_int] Receive two arguments; a! gives (NULL,0) for nil
f: Float [mrb_float]
i: Integer [mrb_int]
b: Boolean [mrb_bool]
n: Symbol [mrb_sym]
d: Data [void*,mrb_data_type const] 2nd argument will be used to check data type so it won't be modified
I: Inline struct [void*]
&: Block [mrb_value] &! raises exception if no block given
*: rest argument [mrb_value*,mrb_int] The rest of the arguments as an array; *! avoid copy of the stack
|: optional Following arguments are optional
?: optional given [mrb_bool] true if preceding argument (optional) is given
*/
MRB_API mrb_int
mrb_get_args(mrb_state *mrb, const char *format, ...)
{
const char *fmt = format;
char c;
mrb_int i = 0;
va_list ap;
mrb_int argc = mrb_get_argc(mrb);
mrb_int arg_i = 0;
mrb_value *array_argv = mrb_get_argv(mrb);
mrb_bool opt = FALSE;
mrb_bool opt_skip = TRUE;
mrb_bool given = TRUE;
va_start(ap, format);
#define ARGV \
(array_argv ? array_argv : (mrb->c->stack + 1))
while ((c = *fmt++)) {
switch (c) {
case '|':
opt = TRUE;
break;
case '*':
opt_skip = FALSE;
goto check_exit;
case '!':
break;
case '&': case '?':
if (opt) opt_skip = FALSE;
break;
default:
break;
}
}
check_exit:
opt = FALSE;
i = 0;
while ((c = *format++)) {
switch (c) {
case '|': case '*': case '&': case '?':
break;
default:
if (argc <= i) {
if (opt) {
given = FALSE;
}
else {
mrb_raise(mrb, E_ARGUMENT_ERROR, "wrong number of arguments");
}
}
break;
}
switch (c) {
case 'o':
{
mrb_value *p;
p = va_arg(ap, mrb_value*);
if (i < argc) {
*p = ARGV[arg_i++];
i++;
}
}
break;
case 'C':
{
mrb_value *p;
p = va_arg(ap, mrb_value*);
if (i < argc) {
mrb_value ss;
ss = ARGV[arg_i++];
if (!class_ptr_p(ss)) {
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not class/module", ss);
}
*p = ss;
i++;
}
}
break;
case 'S':
{
mrb_value *p;
p = va_arg(ap, mrb_value*);
if (*format == '!') {
format++;
if (i < argc && mrb_nil_p(ARGV[arg_i])) {
*p = ARGV[arg_i++];
i++;
break;
}
}
if (i < argc) {
*p = to_str(mrb, ARGV[arg_i++]);
i++;
}
}
break;
case 'A':
{
mrb_value *p;
p = va_arg(ap, mrb_value*);
if (*format == '!') {
format++;
if (i < argc && mrb_nil_p(ARGV[arg_i])) {
*p = ARGV[arg_i++];
i++;
break;
}
}
if (i < argc) {
*p = to_ary(mrb, ARGV[arg_i++]);
i++;
}
}
break;
case 'H':
{
mrb_value *p;
p = va_arg(ap, mrb_value*);
if (*format == '!') {
format++;
if (i < argc && mrb_nil_p(ARGV[arg_i])) {
*p = ARGV[arg_i++];
i++;
break;
}
}
if (i < argc) {
*p = to_hash(mrb, ARGV[arg_i++]);
i++;
}
}
break;
case 's':
{
mrb_value ss;
char **ps = 0;
mrb_int *pl = 0;
ps = va_arg(ap, char**);
pl = va_arg(ap, mrb_int*);
if (*format == '!') {
format++;
if (i < argc && mrb_nil_p(ARGV[arg_i])) {
*ps = NULL;
*pl = 0;
i++; arg_i++;
break;
}
}
if (i < argc) {
ss = to_str(mrb, ARGV[arg_i++]);
*ps = RSTRING_PTR(ss);
*pl = RSTRING_LEN(ss);
i++;
}
}
break;
case 'z':
{
mrb_value ss;
const char **ps;
ps = va_arg(ap, const char**);
if (*format == '!') {
format++;
if (i < argc && mrb_nil_p(ARGV[arg_i])) {
*ps = NULL;
i++; arg_i++;
break;
}
}
if (i < argc) {
ss = to_str(mrb, ARGV[arg_i++]);
*ps = mrb_string_value_cstr(mrb, &ss);
i++;
}
}
break;
case 'a':
{
mrb_value aa;
struct RArray *a;
mrb_value **pb;
mrb_int *pl;
pb = va_arg(ap, mrb_value**);
pl = va_arg(ap, mrb_int*);
if (*format == '!') {
format++;
if (i < argc && mrb_nil_p(ARGV[arg_i])) {
*pb = 0;
*pl = 0;
i++; arg_i++;
break;
}
}
if (i < argc) {
aa = to_ary(mrb, ARGV[arg_i++]);
a = mrb_ary_ptr(aa);
*pb = ARY_PTR(a);
*pl = ARY_LEN(a);
i++;
}
}
break;
case 'I':
{
void* *p;
mrb_value ss;
p = va_arg(ap, void**);
if (i < argc) {
ss = ARGV[arg_i];
if (mrb_type(ss) != MRB_TT_ISTRUCT)
{
mrb_raisef(mrb, E_TYPE_ERROR, "%S is not inline struct", ss);
}
*p = mrb_istruct_ptr(ss);
arg_i++;
i++;
}
}
break;
#ifndef MRB_WITHOUT_FLOAT
case 'f':
{
mrb_float *p;
p = va_arg(ap, mrb_float*);
if (i < argc) {
*p = mrb_to_flo(mrb, ARGV[arg_i]);
arg_i++;
i++;
}
}
break;
#endif
case 'i':
{
mrb_int *p;
p = va_arg(ap, mrb_int*);
if (i < argc) {
switch (mrb_type(ARGV[arg_i])) {
case MRB_TT_FIXNUM:
*p = mrb_fixnum(ARGV[arg_i]);
break;
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
{
mrb_float f = mrb_float(ARGV[arg_i]);
if (!FIXABLE_FLOAT(f)) {
mrb_raise(mrb, E_RANGE_ERROR, "float too big for int");
}
*p = (mrb_int)f;
}
break;
#endif
case MRB_TT_STRING:
mrb_raise(mrb, E_TYPE_ERROR, "no implicit conversion of String into Integer");
break;
default:
*p = mrb_fixnum(mrb_Integer(mrb, ARGV[arg_i]));
break;
}
arg_i++;
i++;
}
}
break;
case 'b':
{
mrb_bool *boolp = va_arg(ap, mrb_bool*);
if (i < argc) {
mrb_value b = ARGV[arg_i++];
*boolp = mrb_test(b);
i++;
}
}
break;
case 'n':
{
mrb_sym *symp;
symp = va_arg(ap, mrb_sym*);
if (i < argc) {
mrb_value ss;
ss = ARGV[arg_i++];
*symp = to_sym(mrb, ss);
i++;
}
}
break;
case 'd':
{
void** datap;
struct mrb_data_type const* type;
datap = va_arg(ap, void**);
type = va_arg(ap, struct mrb_data_type const*);
if (*format == '!') {
format++;
if (i < argc && mrb_nil_p(ARGV[arg_i])) {
*datap = 0;
i++; arg_i++;
break;
}
}
if (i < argc) {
*datap = mrb_data_get_ptr(mrb, ARGV[arg_i++], type);
++i;
}
}
break;
case '&':
{
mrb_value *p, *bp;
p = va_arg(ap, mrb_value*);
if (mrb->c->ci->argc < 0) {
bp = mrb->c->stack + 2;
}
else {
bp = mrb->c->stack + mrb->c->ci->argc + 1;
}
if (*format == '!') {
format ++;
if (mrb_nil_p(*bp)) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
}
}
*p = *bp;
}
break;
case '|':
if (opt_skip && i == argc) return argc;
opt = TRUE;
break;
case '?':
{
mrb_bool *p;
p = va_arg(ap, mrb_bool*);
*p = given;
}
break;
case '*':
{
mrb_value **var;
mrb_int *pl;
mrb_bool nocopy = array_argv ? TRUE : FALSE;
if (*format == '!') {
format++;
nocopy = TRUE;
}
var = va_arg(ap, mrb_value**);
pl = va_arg(ap, mrb_int*);
if (argc > i) {
*pl = argc-i;
if (*pl > 0) {
if (nocopy) {
*var = ARGV+arg_i;
}
else {
mrb_value args = mrb_ary_new_from_values(mrb, *pl, ARGV+arg_i);
RARRAY(args)->c = NULL;
*var = RARRAY_PTR(args);
}
}
i = argc;
arg_i += *pl;
}
else {
*pl = 0;
*var = NULL;
}
}
break;
default:
mrb_raisef(mrb, E_ARGUMENT_ERROR, "invalid argument specifier %S", mrb_str_new(mrb, &c, 1));
break;
}
}
#undef ARGV
if (!c && argc > i) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "wrong number of arguments");
}
va_end(ap);
return i;
}
static struct RClass*
boot_defclass(mrb_state *mrb, struct RClass *super)
{
struct RClass *c;
c = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_CLASS, mrb->class_class);
if (super) {
c->super = super;
mrb_field_write_barrier(mrb, (struct RBasic*)c, (struct RBasic*)super);
}
else {
c->super = mrb->object_class;
}
c->mt = kh_init(mt, mrb);
return c;
}
static void
boot_initmod(mrb_state *mrb, struct RClass *mod)
{
if (!mod->mt) {
mod->mt = kh_init(mt, mrb);
}
}
static struct RClass*
include_class_new(mrb_state *mrb, struct RClass *m, struct RClass *super)
{
struct RClass *ic = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_ICLASS, mrb->class_class);
if (m->tt == MRB_TT_ICLASS) {
m = m->c;
}
MRB_CLASS_ORIGIN(m);
ic->iv = m->iv;
ic->mt = m->mt;
ic->super = super;
if (m->tt == MRB_TT_ICLASS) {
ic->c = m->c;
}
else {
ic->c = m;
}
return ic;
}
static int
include_module_at(mrb_state *mrb, struct RClass *c, struct RClass *ins_pos, struct RClass *m, int search_super)
{
struct RClass *p, *ic;
void *klass_mt = find_origin(c)->mt;
while (m) {
int superclass_seen = 0;
if (m->flags & MRB_FLAG_IS_PREPENDED)
goto skip;
if (klass_mt && klass_mt == m->mt)
return -1;
p = c->super;
while (p) {
if (p->tt == MRB_TT_ICLASS) {
if (p->mt == m->mt) {
if (!superclass_seen) {
ins_pos = p; /* move insert point */
}
goto skip;
}
} else if (p->tt == MRB_TT_CLASS) {
if (!search_super) break;
superclass_seen = 1;
}
p = p->super;
}
ic = include_class_new(mrb, m, ins_pos->super);
m->flags |= MRB_FLAG_IS_INHERITED;
ins_pos->super = ic;
mrb_field_write_barrier(mrb, (struct RBasic*)ins_pos, (struct RBasic*)ic);
mc_clear_by_class(mrb, ins_pos);
ins_pos = ic;
skip:
m = m->super;
}
mc_clear_all(mrb);
return 0;
}
MRB_API void
mrb_include_module(mrb_state *mrb, struct RClass *c, struct RClass *m)
{
int changed = include_module_at(mrb, c, find_origin(c), m, 1);
if (changed < 0) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "cyclic include detected");
}
}
MRB_API void
mrb_prepend_module(mrb_state *mrb, struct RClass *c, struct RClass *m)
{
struct RClass *origin;
int changed = 0;
if (!(c->flags & MRB_FLAG_IS_PREPENDED)) {
origin = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_ICLASS, c);
origin->flags |= MRB_FLAG_IS_ORIGIN | MRB_FLAG_IS_INHERITED;
origin->super = c->super;
c->super = origin;
origin->mt = c->mt;
c->mt = kh_init(mt, mrb);
mrb_field_write_barrier(mrb, (struct RBasic*)c, (struct RBasic*)origin);
c->flags |= MRB_FLAG_IS_PREPENDED;
}
changed = include_module_at(mrb, c, c, m, 0);
if (changed < 0) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "cyclic prepend detected");
}
}
static mrb_value
mrb_mod_prepend_features(mrb_state *mrb, mrb_value mod)
{
mrb_value klass;
mrb_check_type(mrb, mod, MRB_TT_MODULE);
mrb_get_args(mrb, "C", &klass);
mrb_prepend_module(mrb, mrb_class_ptr(klass), mrb_class_ptr(mod));
return mod;
}
static mrb_value
mrb_mod_append_features(mrb_state *mrb, mrb_value mod)
{
mrb_value klass;
mrb_check_type(mrb, mod, MRB_TT_MODULE);
mrb_get_args(mrb, "C", &klass);
mrb_include_module(mrb, mrb_class_ptr(klass), mrb_class_ptr(mod));
return mod;
}
/* 15.2.2.4.28 */
/*
* call-seq:
* mod.include?(module) -> true or false
*
* Returns <code>true</code> if <i>module</i> is included in
* <i>mod</i> or one of <i>mod</i>'s ancestors.
*
* module A
* end
* class B
* include A
* end
* class C < B
* end
* B.include?(A) #=> true
* C.include?(A) #=> true
* A.include?(A) #=> false
*/
static mrb_value
mrb_mod_include_p(mrb_state *mrb, mrb_value mod)
{
mrb_value mod2;
struct RClass *c = mrb_class_ptr(mod);
mrb_get_args(mrb, "C", &mod2);
mrb_check_type(mrb, mod2, MRB_TT_MODULE);
while (c) {
if (c->tt == MRB_TT_ICLASS) {
if (c->c == mrb_class_ptr(mod2)) return mrb_true_value();
}
c = c->super;
}
return mrb_false_value();
}
static mrb_value
mrb_mod_ancestors(mrb_state *mrb, mrb_value self)
{
mrb_value result;
struct RClass *c = mrb_class_ptr(self);
result = mrb_ary_new(mrb);
while (c) {
if (c->tt == MRB_TT_ICLASS) {
mrb_ary_push(mrb, result, mrb_obj_value(c->c));
}
else if (!(c->flags & MRB_FLAG_IS_PREPENDED)) {
mrb_ary_push(mrb, result, mrb_obj_value(c));
}
c = c->super;
}
return result;
}
static mrb_value
mrb_mod_extend_object(mrb_state *mrb, mrb_value mod)
{
mrb_value obj;
mrb_check_type(mrb, mod, MRB_TT_MODULE);
mrb_get_args(mrb, "o", &obj);
mrb_include_module(mrb, mrb_class_ptr(mrb_singleton_class(mrb, obj)), mrb_class_ptr(mod));
return mod;
}
static mrb_value
mrb_mod_included_modules(mrb_state *mrb, mrb_value self)
{
mrb_value result;
struct RClass *c = mrb_class_ptr(self);
struct RClass *origin = c;
MRB_CLASS_ORIGIN(origin);
result = mrb_ary_new(mrb);
while (c) {
if (c != origin && c->tt == MRB_TT_ICLASS) {
if (c->c->tt == MRB_TT_MODULE) {
mrb_ary_push(mrb, result, mrb_obj_value(c->c));
}
}
c = c->super;
}
return result;
}
static mrb_value
mrb_mod_initialize(mrb_state *mrb, mrb_value mod)
{
mrb_value b;
struct RClass *m = mrb_class_ptr(mod);
boot_initmod(mrb, m); /* bootstrap a newly initialized module */
mrb_get_args(mrb, "|&", &b);
if (!mrb_nil_p(b)) {
mrb_yield_with_class(mrb, b, 1, &mod, mod, m);
}
return mod;
}
mrb_value mrb_class_instance_method_list(mrb_state*, mrb_bool, struct RClass*, int);
/* 15.2.2.4.33 */
/*
* call-seq:
* mod.instance_methods(include_super=true) -> array
*
* Returns an array containing the names of the public and protected instance
* methods in the receiver. For a module, these are the public and protected methods;
* for a class, they are the instance (not singleton) methods. With no
* argument, or with an argument that is <code>false</code>, the
* instance methods in <i>mod</i> are returned, otherwise the methods
* in <i>mod</i> and <i>mod</i>'s superclasses are returned.
*
* module A
* def method1() end
* end
* class B
* def method2() end
* end
* class C < B
* def method3() end
* end
*
* A.instance_methods #=> [:method1]
* B.instance_methods(false) #=> [:method2]
* C.instance_methods(false) #=> [:method3]
* C.instance_methods(true).length #=> 43
*/
static mrb_value
mrb_mod_instance_methods(mrb_state *mrb, mrb_value mod)
{
struct RClass *c = mrb_class_ptr(mod);
mrb_bool recur = TRUE;
mrb_get_args(mrb, "|b", &recur);
return mrb_class_instance_method_list(mrb, recur, c, 0);
}
/* implementation of module_eval/class_eval */
mrb_value mrb_mod_module_eval(mrb_state*, mrb_value);
static mrb_value
mrb_mod_dummy_visibility(mrb_state *mrb, mrb_value mod)
{
return mod;
}
MRB_API mrb_value
mrb_singleton_class(mrb_state *mrb, mrb_value v)
{
struct RBasic *obj;
switch (mrb_type(v)) {
case MRB_TT_FALSE:
if (mrb_nil_p(v))
return mrb_obj_value(mrb->nil_class);
return mrb_obj_value(mrb->false_class);
case MRB_TT_TRUE:
return mrb_obj_value(mrb->true_class);
case MRB_TT_CPTR:
return mrb_obj_value(mrb->object_class);
case MRB_TT_SYMBOL:
case MRB_TT_FIXNUM:
#ifndef MRB_WITHOUT_FLOAT
case MRB_TT_FLOAT:
#endif
mrb_raise(mrb, E_TYPE_ERROR, "can't define singleton");
return mrb_nil_value(); /* not reached */
default:
break;
}
obj = mrb_basic_ptr(v);
prepare_singleton_class(mrb, obj);
return mrb_obj_value(obj->c);
}
MRB_API void
mrb_define_singleton_method(mrb_state *mrb, struct RObject *o, const char *name, mrb_func_t func, mrb_aspec aspec)
{
prepare_singleton_class(mrb, (struct RBasic*)o);
mrb_define_method_id(mrb, o->c, mrb_intern_cstr(mrb, name), func, aspec);
}
MRB_API void
mrb_define_class_method(mrb_state *mrb, struct RClass *c, const char *name, mrb_func_t func, mrb_aspec aspec)
{
mrb_define_singleton_method(mrb, (struct RObject*)c, name, func, aspec);
}
MRB_API void
mrb_define_module_function(mrb_state *mrb, struct RClass *c, const char *name, mrb_func_t func, mrb_aspec aspec)
{
mrb_define_class_method(mrb, c, name, func, aspec);
mrb_define_method(mrb, c, name, func, aspec);
}
#ifdef MRB_METHOD_CACHE
static void
mc_clear_all(mrb_state *mrb)
{
struct mrb_cache_entry *mc = mrb->cache;
int i;
for (i=0; i<MRB_METHOD_CACHE_SIZE; i++) {
mc[i].c = 0;
}
}
static void
mc_clear_by_class(mrb_state *mrb, struct RClass *c)
{
struct mrb_cache_entry *mc = mrb->cache;
int i;
if (c->flags & MRB_FLAG_IS_INHERITED) {
mc_clear_all(mrb);
c->flags &= ~MRB_FLAG_IS_INHERITED;
return;
}
for (i=0; i<MRB_METHOD_CACHE_SIZE; i++) {
if (mc[i].c == c) mc[i].c = 0;
}
}
static void
mc_clear_by_id(mrb_state *mrb, struct RClass *c, mrb_sym mid)
{
struct mrb_cache_entry *mc = mrb->cache;
int i;
if (c->flags & MRB_FLAG_IS_INHERITED) {
mc_clear_all(mrb);
c->flags &= ~MRB_FLAG_IS_INHERITED;
return;
}
for (i=0; i<MRB_METHOD_CACHE_SIZE; i++) {
if (mc[i].c == c || mc[i].mid == mid)
mc[i].c = 0;
}
}
#endif
MRB_API mrb_method_t
mrb_method_search_vm(mrb_state *mrb, struct RClass **cp, mrb_sym mid)
{
khiter_t k;
mrb_method_t m;
struct RClass *c = *cp;
#ifdef MRB_METHOD_CACHE
struct RClass *oc = c;
int h = kh_int_hash_func(mrb, ((intptr_t)oc) ^ mid) & (MRB_METHOD_CACHE_SIZE-1);
struct mrb_cache_entry *mc = &mrb->cache[h];
if (mc->c == c && mc->mid == mid) {
*cp = mc->c0;
return mc->m;
}
#endif
while (c) {
khash_t(mt) *h = c->mt;
if (h) {
k = kh_get(mt, mrb, h, mid);
if (k != kh_end(h)) {
m = kh_value(h, k);
if (MRB_METHOD_UNDEF_P(m)) break;
*cp = c;
#ifdef MRB_METHOD_CACHE
mc->c = oc;
mc->c0 = c;
mc->mid = mid;
mc->m = m;
#endif
return m;
}
}
c = c->super;
}
MRB_METHOD_FROM_PROC(m, NULL);
return m; /* no method */
}
MRB_API mrb_method_t
mrb_method_search(mrb_state *mrb, struct RClass* c, mrb_sym mid)
{
mrb_method_t m;
m = mrb_method_search_vm(mrb, &c, mid);
if (MRB_METHOD_UNDEF_P(m)) {
mrb_value inspect = mrb_funcall(mrb, mrb_obj_value(c), "inspect", 0);
if (mrb_string_p(inspect) && RSTRING_LEN(inspect) > 64) {
inspect = mrb_any_to_s(mrb, mrb_obj_value(c));
}
mrb_name_error(mrb, mid, "undefined method '%S' for class %S",
mrb_sym2str(mrb, mid), inspect);
}
return m;
}
static mrb_value
attr_reader(mrb_state *mrb, mrb_value obj)
{
mrb_value name = mrb_proc_cfunc_env_get(mrb, 0);
return mrb_iv_get(mrb, obj, to_sym(mrb, name));
}
static mrb_value
mrb_mod_attr_reader(mrb_state *mrb, mrb_value mod)
{
struct RClass *c = mrb_class_ptr(mod);
mrb_value *argv;
mrb_int argc, i;
int ai;
mrb_get_args(mrb, "*", &argv, &argc);
ai = mrb_gc_arena_save(mrb);
for (i=0; i<argc; i++) {
mrb_value name, str;
mrb_sym method, sym;
struct RProc *p;
mrb_method_t m;
method = to_sym(mrb, argv[i]);
name = mrb_sym2str(mrb, method);
str = mrb_str_new_capa(mrb, RSTRING_LEN(name)+1);
mrb_str_cat_lit(mrb, str, "@");
mrb_str_cat_str(mrb, str, name);
sym = mrb_intern_str(mrb, str);
mrb_iv_check(mrb, sym);
name = mrb_symbol_value(sym);
p = mrb_proc_new_cfunc_with_env(mrb, attr_reader, 1, &name);
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, c, method, m);
mrb_gc_arena_restore(mrb, ai);
}
return mrb_nil_value();
}
static mrb_value
attr_writer(mrb_state *mrb, mrb_value obj)
{
mrb_value name = mrb_proc_cfunc_env_get(mrb, 0);
mrb_value val;
mrb_get_args(mrb, "o", &val);
mrb_iv_set(mrb, obj, to_sym(mrb, name), val);
return val;
}
static mrb_value
mrb_mod_attr_writer(mrb_state *mrb, mrb_value mod)
{
struct RClass *c = mrb_class_ptr(mod);
mrb_value *argv;
mrb_int argc, i;
int ai;
mrb_get_args(mrb, "*", &argv, &argc);
ai = mrb_gc_arena_save(mrb);
for (i=0; i<argc; i++) {
mrb_value name, str, attr;
mrb_sym method, sym;
struct RProc *p;
mrb_method_t m;
method = to_sym(mrb, argv[i]);
/* prepare iv name (@name) */
name = mrb_sym2str(mrb, method);
str = mrb_str_new_capa(mrb, RSTRING_LEN(name)+1);
mrb_str_cat_lit(mrb, str, "@");
mrb_str_cat_str(mrb, str, name);
sym = mrb_intern_str(mrb, str);
mrb_iv_check(mrb, sym);
attr = mrb_symbol_value(sym);
/* prepare method name (name=) */
str = mrb_str_new_capa(mrb, RSTRING_LEN(str));
mrb_str_cat_str(mrb, str, name);
mrb_str_cat_lit(mrb, str, "=");
method = mrb_intern_str(mrb, str);
p = mrb_proc_new_cfunc_with_env(mrb, attr_writer, 1, &attr);
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, c, method, m);
mrb_gc_arena_restore(mrb, ai);
}
return mrb_nil_value();
}
static mrb_value
mrb_instance_alloc(mrb_state *mrb, mrb_value cv)
{
struct RClass *c = mrb_class_ptr(cv);
struct RObject *o;
enum mrb_vtype ttype = MRB_INSTANCE_TT(c);
if (c->tt == MRB_TT_SCLASS)
mrb_raise(mrb, E_TYPE_ERROR, "can't create instance of singleton class");
if (ttype == 0) ttype = MRB_TT_OBJECT;
if (ttype <= MRB_TT_CPTR) {
mrb_raisef(mrb, E_TYPE_ERROR, "can't create instance of %S", cv);
}
o = (struct RObject*)mrb_obj_alloc(mrb, ttype, c);
return mrb_obj_value(o);
}
/*
* call-seq:
* class.new(args, ...) -> obj
*
* Creates a new object of <i>class</i>'s class, then
* invokes that object's <code>initialize</code> method,
* passing it <i>args</i>. This is the method that ends
* up getting called whenever an object is constructed using
* `.new`.
*
*/
MRB_API mrb_value
mrb_instance_new(mrb_state *mrb, mrb_value cv)
{
mrb_value obj, blk;
mrb_value *argv;
mrb_int argc;
mrb_sym init;
mrb_method_t m;
mrb_get_args(mrb, "*&", &argv, &argc, &blk);
obj = mrb_instance_alloc(mrb, cv);
init = mrb_intern_lit(mrb, "initialize");
m = mrb_method_search(mrb, mrb_class(mrb, obj), init);
if (MRB_METHOD_CFUNC_P(m)) {
mrb_func_t f = MRB_METHOD_CFUNC(m);
if (f != mrb_bob_init) {
f(mrb, obj);
}
}
else {
mrb_funcall_with_block(mrb, obj, init, argc, argv, blk);
}
return obj;
}
MRB_API mrb_value
mrb_obj_new(mrb_state *mrb, struct RClass *c, mrb_int argc, const mrb_value *argv)
{
mrb_value obj;
mrb_sym mid;
obj = mrb_instance_alloc(mrb, mrb_obj_value(c));
mid = mrb_intern_lit(mrb, "initialize");
if (!mrb_func_basic_p(mrb, obj, mid, mrb_bob_init)) {
mrb_funcall_argv(mrb, obj, mid, argc, argv);
}
return obj;
}
static mrb_value
mrb_class_initialize(mrb_state *mrb, mrb_value c)
{
mrb_value a, b;
mrb_get_args(mrb, "|C&", &a, &b);
if (!mrb_nil_p(b)) {
mrb_yield_with_class(mrb, b, 1, &c, c, mrb_class_ptr(c));
}
return c;
}
static mrb_value
mrb_class_new_class(mrb_state *mrb, mrb_value cv)
{
mrb_int n;
mrb_value super, blk;
mrb_value new_class;
mrb_sym mid;
n = mrb_get_args(mrb, "|C&", &super, &blk);
if (n == 0) {
super = mrb_obj_value(mrb->object_class);
}
new_class = mrb_obj_value(mrb_class_new(mrb, mrb_class_ptr(super)));
mid = mrb_intern_lit(mrb, "initialize");
if (!mrb_func_basic_p(mrb, new_class, mid, mrb_bob_init)) {
mrb_funcall_with_block(mrb, new_class, mid, n, &super, blk);
}
mrb_class_inherited(mrb, mrb_class_ptr(super), mrb_class_ptr(new_class));
return new_class;
}
static mrb_value
mrb_class_superclass(mrb_state *mrb, mrb_value klass)
{
struct RClass *c;
c = mrb_class_ptr(klass);
c = find_origin(c)->super;
while (c && c->tt == MRB_TT_ICLASS) {
c = find_origin(c)->super;
}
if (!c) return mrb_nil_value();
return mrb_obj_value(c);
}
static mrb_value
mrb_bob_init(mrb_state *mrb, mrb_value cv)
{
return mrb_nil_value();
}
static mrb_value
mrb_bob_not(mrb_state *mrb, mrb_value cv)
{
return mrb_bool_value(!mrb_test(cv));
}
/* 15.3.1.3.1 */
/* 15.3.1.3.10 */
/* 15.3.1.3.11 */
/*
* call-seq:
* obj == other -> true or false
* obj.equal?(other) -> true or false
* obj.eql?(other) -> true or false
*
* Equality---At the <code>Object</code> level, <code>==</code> returns
* <code>true</code> only if <i>obj</i> and <i>other</i> are the
* same object. Typically, this method is overridden in descendant
* classes to provide class-specific meaning.
*
* Unlike <code>==</code>, the <code>equal?</code> method should never be
* overridden by subclasses: it is used to determine object identity
* (that is, <code>a.equal?(b)</code> iff <code>a</code> is the same
* object as <code>b</code>).
*
* The <code>eql?</code> method returns <code>true</code> if
* <i>obj</i> and <i>anObject</i> have the same value. Used by
* <code>Hash</code> to test members for equality. For objects of
* class <code>Object</code>, <code>eql?</code> is synonymous with
* <code>==</code>. Subclasses normally continue this tradition, but
* there are exceptions. <code>Numeric</code> types, for example,
* perform type conversion across <code>==</code>, but not across
* <code>eql?</code>, so:
*
* 1 == 1.0 #=> true
* 1.eql? 1.0 #=> false
*/
mrb_value
mrb_obj_equal_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "o", &arg);
return mrb_bool_value(mrb_obj_equal(mrb, self, arg));
}
static mrb_value
mrb_obj_not_equal_m(mrb_state *mrb, mrb_value self)
{
mrb_value arg;
mrb_get_args(mrb, "o", &arg);
return mrb_bool_value(!mrb_equal(mrb, self, arg));
}
MRB_API mrb_bool
mrb_obj_respond_to(mrb_state *mrb, struct RClass* c, mrb_sym mid)
{
mrb_method_t m;
m = mrb_method_search_vm(mrb, &c, mid);
if (MRB_METHOD_UNDEF_P(m)) {
return FALSE;
}
return TRUE;
}
MRB_API mrb_bool
mrb_respond_to(mrb_state *mrb, mrb_value obj, mrb_sym mid)
{
return mrb_obj_respond_to(mrb, mrb_class(mrb, obj), mid);
}
MRB_API mrb_value
mrb_class_path(mrb_state *mrb, struct RClass *c)
{
mrb_value path;
mrb_sym nsym = mrb_intern_lit(mrb, "__classname__");
path = mrb_obj_iv_get(mrb, (struct RObject*)c, nsym);
if (mrb_nil_p(path)) {
/* no name (yet) */
return mrb_class_find_path(mrb, c);
}
else if (mrb_symbol_p(path)) {
/* toplevel class/module */
const char *str;
mrb_int len;
str = mrb_sym2name_len(mrb, mrb_symbol(path), &len);
return mrb_str_new(mrb, str, len);
}
return mrb_str_dup(mrb, path);
}
MRB_API struct RClass*
mrb_class_real(struct RClass* cl)
{
if (cl == 0)
return NULL;
while ((cl->tt == MRB_TT_SCLASS) || (cl->tt == MRB_TT_ICLASS)) {
cl = cl->super;
}
return cl;
}
MRB_API const char*
mrb_class_name(mrb_state *mrb, struct RClass* c)
{
mrb_value path = mrb_class_path(mrb, c);
if (mrb_nil_p(path)) {
path = mrb_str_new_lit(mrb, "#<Class:");
mrb_str_concat(mrb, path, mrb_ptr_to_str(mrb, c));
mrb_str_cat_lit(mrb, path, ">");
}
return RSTRING_PTR(path);
}
MRB_API const char*
mrb_obj_classname(mrb_state *mrb, mrb_value obj)
{
return mrb_class_name(mrb, mrb_obj_class(mrb, obj));
}
/*!
* Ensures a class can be derived from super.
*
* \param super a reference to an object.
* \exception TypeError if \a super is not a Class or \a super is a singleton class.
*/
static void
mrb_check_inheritable(mrb_state *mrb, struct RClass *super)
{
if (super->tt != MRB_TT_CLASS) {
mrb_raisef(mrb, E_TYPE_ERROR, "superclass must be a Class (%S given)", mrb_obj_value(super));
}
if (super->tt == MRB_TT_SCLASS) {
mrb_raise(mrb, E_TYPE_ERROR, "can't make subclass of singleton class");
}
if (super == mrb->class_class) {
mrb_raise(mrb, E_TYPE_ERROR, "can't make subclass of Class");
}
}
/*!
* Creates a new class.
* \param super a class from which the new class derives.
* \exception TypeError \a super is not inheritable.
* \exception TypeError \a super is the Class class.
*/
MRB_API struct RClass*
mrb_class_new(mrb_state *mrb, struct RClass *super)
{
struct RClass *c;
if (super) {
mrb_check_inheritable(mrb, super);
}
c = boot_defclass(mrb, super);
if (super) {
MRB_SET_INSTANCE_TT(c, MRB_INSTANCE_TT(super));
}
make_metaclass(mrb, c);
return c;
}
/*!
* Creates a new module.
*/
MRB_API struct RClass*
mrb_module_new(mrb_state *mrb)
{
struct RClass *m = (struct RClass*)mrb_obj_alloc(mrb, MRB_TT_MODULE, mrb->module_class);
boot_initmod(mrb, m);
return m;
}
/*
* call-seq:
* obj.class => class
*
* Returns the class of <i>obj</i>, now preferred over
* <code>Object#type</code>, as an object's type in Ruby is only
* loosely tied to that object's class. This method must always be
* called with an explicit receiver, as <code>class</code> is also a
* reserved word in Ruby.
*
* 1.class #=> Fixnum
* self.class #=> Object
*/
MRB_API struct RClass*
mrb_obj_class(mrb_state *mrb, mrb_value obj)
{
return mrb_class_real(mrb_class(mrb, obj));
}
MRB_API void
mrb_alias_method(mrb_state *mrb, struct RClass *c, mrb_sym a, mrb_sym b)
{
mrb_method_t m = mrb_method_search(mrb, c, b);
mrb_define_method_raw(mrb, c, a, m);
}
/*!
* Defines an alias of a method.
* \param klass the class which the original method belongs to
* \param name1 a new name for the method
* \param name2 the original name of the method
*/
MRB_API void
mrb_define_alias(mrb_state *mrb, struct RClass *klass, const char *name1, const char *name2)
{
mrb_alias_method(mrb, klass, mrb_intern_cstr(mrb, name1), mrb_intern_cstr(mrb, name2));
}
/*
* call-seq:
* mod.to_s -> string
*
* Return a string representing this module or class. For basic
* classes and modules, this is the name. For singletons, we
* show information on the thing we're attached to as well.
*/
static mrb_value
mrb_mod_to_s(mrb_state *mrb, mrb_value klass)
{
mrb_value str;
if (mrb_type(klass) == MRB_TT_SCLASS) {
mrb_value v = mrb_iv_get(mrb, klass, mrb_intern_lit(mrb, "__attached__"));
str = mrb_str_new_lit(mrb, "#<Class:");
if (class_ptr_p(v)) {
mrb_str_cat_str(mrb, str, mrb_inspect(mrb, v));
}
else {
mrb_str_cat_str(mrb, str, mrb_any_to_s(mrb, v));
}
return mrb_str_cat_lit(mrb, str, ">");
}
else {
struct RClass *c;
mrb_value path;
str = mrb_str_new_capa(mrb, 32);
c = mrb_class_ptr(klass);
path = mrb_class_path(mrb, c);
if (mrb_nil_p(path)) {
switch (mrb_type(klass)) {
case MRB_TT_CLASS:
mrb_str_cat_lit(mrb, str, "#<Class:");
break;
case MRB_TT_MODULE:
mrb_str_cat_lit(mrb, str, "#<Module:");
break;
default:
/* Shouldn't be happened? */
mrb_str_cat_lit(mrb, str, "#<??????:");
break;
}
mrb_str_concat(mrb, str, mrb_ptr_to_str(mrb, c));
return mrb_str_cat_lit(mrb, str, ">");
}
else {
return path;
}
}
}
static mrb_value
mrb_mod_alias(mrb_state *mrb, mrb_value mod)
{
struct RClass *c = mrb_class_ptr(mod);
mrb_sym new_name, old_name;
mrb_get_args(mrb, "nn", &new_name, &old_name);
mrb_alias_method(mrb, c, new_name, old_name);
return mrb_nil_value();
}
static void
undef_method(mrb_state *mrb, struct RClass *c, mrb_sym a)
{
if (!mrb_obj_respond_to(mrb, c, a)) {
mrb_name_error(mrb, a, "undefined method '%S' for class '%S'", mrb_sym2str(mrb, a), mrb_obj_value(c));
}
else {
mrb_method_t m;
MRB_METHOD_FROM_PROC(m, NULL);
mrb_define_method_raw(mrb, c, a, m);
}
}
MRB_API void
mrb_undef_method(mrb_state *mrb, struct RClass *c, const char *name)
{
undef_method(mrb, c, mrb_intern_cstr(mrb, name));
}
MRB_API void
mrb_undef_class_method(mrb_state *mrb, struct RClass *c, const char *name)
{
mrb_undef_method(mrb, mrb_class_ptr(mrb_singleton_class(mrb, mrb_obj_value(c))), name);
}
static mrb_value
mrb_mod_undef(mrb_state *mrb, mrb_value mod)
{
struct RClass *c = mrb_class_ptr(mod);
mrb_int argc;
mrb_value *argv;
mrb_get_args(mrb, "*", &argv, &argc);
while (argc--) {
undef_method(mrb, c, to_sym(mrb, *argv));
argv++;
}
return mrb_nil_value();
}
static mrb_value
mod_define_method(mrb_state *mrb, mrb_value self)
{
struct RClass *c = mrb_class_ptr(self);
struct RProc *p;
mrb_method_t m;
mrb_sym mid;
mrb_value proc = mrb_undef_value();
mrb_value blk;
mrb_get_args(mrb, "n|o&", &mid, &proc, &blk);
switch (mrb_type(proc)) {
case MRB_TT_PROC:
blk = proc;
break;
case MRB_TT_UNDEF:
/* ignored */
break;
default:
mrb_raisef(mrb, E_TYPE_ERROR, "wrong argument type %S (expected Proc)", mrb_obj_value(mrb_obj_class(mrb, proc)));
break;
}
if (mrb_nil_p(blk)) {
mrb_raise(mrb, E_ARGUMENT_ERROR, "no block given");
}
p = (struct RProc*)mrb_obj_alloc(mrb, MRB_TT_PROC, mrb->proc_class);
mrb_proc_copy(p, mrb_proc_ptr(blk));
p->flags |= MRB_PROC_STRICT;
MRB_METHOD_FROM_PROC(m, p);
mrb_define_method_raw(mrb, c, mid, m);
return mrb_symbol_value(mid);
}
static mrb_value
top_define_method(mrb_state *mrb, mrb_value self)
{
return mod_define_method(mrb, mrb_obj_value(mrb->object_class));
}
static void
check_cv_name_str(mrb_state *mrb, mrb_value str)
{
const char *s = RSTRING_PTR(str);
mrb_int len = RSTRING_LEN(str);
if (len < 3 || !(s[0] == '@' && s[1] == '@')) {
mrb_name_error(mrb, mrb_intern_str(mrb, str), "'%S' is not allowed as a class variable name", str);
}
}
static void
check_cv_name_sym(mrb_state *mrb, mrb_sym id)
{
check_cv_name_str(mrb, mrb_sym2str(mrb, id));
}
/* 15.2.2.4.16 */
/*
* call-seq:
* obj.class_variable_defined?(symbol) -> true or false
*
* Returns <code>true</code> if the given class variable is defined
* in <i>obj</i>.
*
* class Fred
* @@foo = 99
* end
* Fred.class_variable_defined?(:@@foo) #=> true
* Fred.class_variable_defined?(:@@bar) #=> false
*/
static mrb_value
mrb_mod_cvar_defined(mrb_state *mrb, mrb_value mod)
{
mrb_sym id;
mrb_get_args(mrb, "n", &id);
check_cv_name_sym(mrb, id);
return mrb_bool_value(mrb_cv_defined(mrb, mod, id));
}
/* 15.2.2.4.17 */
/*
* call-seq:
* mod.class_variable_get(symbol) -> obj
*
* Returns the value of the given class variable (or throws a
* <code>NameError</code> exception). The <code>@@</code> part of the
* variable name should be included for regular class variables
*
* class Fred
* @@foo = 99
* end
* Fred.class_variable_get(:@@foo) #=> 99
*/
static mrb_value
mrb_mod_cvar_get(mrb_state *mrb, mrb_value mod)
{
mrb_sym id;
mrb_get_args(mrb, "n", &id);
check_cv_name_sym(mrb, id);
return mrb_cv_get(mrb, mod, id);
}
/* 15.2.2.4.18 */
/*
* call-seq:
* obj.class_variable_set(symbol, obj) -> obj
*
* Sets the class variable names by <i>symbol</i> to
* <i>object</i>.
*
* class Fred
* @@foo = 99
* def foo
* @@foo
* end
* end
* Fred.class_variable_set(:@@foo, 101) #=> 101
* Fred.new.foo #=> 101
*/
static mrb_value
mrb_mod_cvar_set(mrb_state *mrb, mrb_value mod)
{
mrb_value value;
mrb_sym id;
mrb_get_args(mrb, "no", &id, &value);
check_cv_name_sym(mrb, id);
mrb_cv_set(mrb, mod, id, value);
return value;
}
/* 15.2.2.4.39 */
/*
* call-seq:
* remove_class_variable(sym) -> obj
*
* Removes the definition of the <i>sym</i>, returning that
* constant's value.
*
* class Dummy
* @@var = 99
* puts @@var
* p class_variables
* remove_class_variable(:@@var)
* p class_variables
* end
*
* <em>produces:</em>
*
* 99
* [:@@var]
* []
*/
static mrb_value
mrb_mod_remove_cvar(mrb_state *mrb, mrb_value mod)
{
mrb_value val;
mrb_sym id;
mrb_get_args(mrb, "n", &id);
check_cv_name_sym(mrb, id);
val = mrb_iv_remove(mrb, mod, id);
if (!mrb_undef_p(val)) return val;
if (mrb_cv_defined(mrb, mod, id)) {
mrb_name_error(mrb, id, "cannot remove %S for %S",
mrb_sym2str(mrb, id), mod);
}
mrb_name_error(mrb, id, "class variable %S not defined for %S",
mrb_sym2str(mrb, id), mod);
/* not reached */
return mrb_nil_value();
}
/* 15.2.2.4.34 */
/*
* call-seq:
* mod.method_defined?(symbol) -> true or false
*
* Returns +true+ if the named method is defined by
* _mod_ (or its included modules and, if _mod_ is a class,
* its ancestors). Public and protected methods are matched.
*
* module A
* def method1() end
* end
* class B
* def method2() end
* end
* class C < B
* include A
* def method3() end
* end
*
* A.method_defined? :method1 #=> true
* C.method_defined? "method1" #=> true
* C.method_defined? "method2" #=> true
* C.method_defined? "method3" #=> true
* C.method_defined? "method4" #=> false
*/
static mrb_value
mrb_mod_method_defined(mrb_state *mrb, mrb_value mod)
{
mrb_sym id;
mrb_get_args(mrb, "n", &id);
return mrb_bool_value(mrb_obj_respond_to(mrb, mrb_class_ptr(mod), id));
}
static void
remove_method(mrb_state *mrb, mrb_value mod, mrb_sym mid)
{
struct RClass *c = mrb_class_ptr(mod);
khash_t(mt) *h = find_origin(c)->mt;
khiter_t k;
if (h) {
k = kh_get(mt, mrb, h, mid);
if (k != kh_end(h)) {
kh_del(mt, mrb, h, k);
mrb_funcall(mrb, mod, "method_removed", 1, mrb_symbol_value(mid));
return;
}
}
mrb_name_error(mrb, mid, "method '%S' not defined in %S",
mrb_sym2str(mrb, mid), mod);
}
/* 15.2.2.4.41 */
/*
* call-seq:
* remove_method(symbol) -> self
*
* Removes the method identified by _symbol_ from the current
* class. For an example, see <code>Module.undef_method</code>.
*/
static mrb_value
mrb_mod_remove_method(mrb_state *mrb, mrb_value mod)
{
mrb_int argc;
mrb_value *argv;
mrb_get_args(mrb, "*", &argv, &argc);
while (argc--) {
remove_method(mrb, mod, to_sym(mrb, *argv));
argv++;
}
return mod;
}
static void
check_const_name_str(mrb_state *mrb, mrb_value str)
{
if (RSTRING_LEN(str) < 1 || !ISUPPER(*RSTRING_PTR(str))) {
mrb_name_error(mrb, mrb_intern_str(mrb, str), "wrong constant name %S", str);
}
}
static void
check_const_name_sym(mrb_state *mrb, mrb_sym id)
{
check_const_name_str(mrb, mrb_sym2str(mrb, id));
}
static mrb_value
const_defined(mrb_state *mrb, mrb_value mod, mrb_sym id, mrb_bool inherit)
{
if (inherit) {
return mrb_bool_value(mrb_const_defined(mrb, mod, id));
}
return mrb_bool_value(mrb_const_defined_at(mrb, mod, id));
}
static mrb_value
mrb_mod_const_defined(mrb_state *mrb, mrb_value mod)
{
mrb_sym id;
mrb_bool inherit = TRUE;
mrb_get_args(mrb, "n|b", &id, &inherit);
check_const_name_sym(mrb, id);
return const_defined(mrb, mod, id, inherit);
}
static mrb_value
mrb_const_get_sym(mrb_state *mrb, mrb_value mod, mrb_sym id)
{
check_const_name_sym(mrb, id);
return mrb_const_get(mrb, mod, id);
}
static mrb_value
mrb_mod_const_get(mrb_state *mrb, mrb_value mod)
{
mrb_value path;
mrb_sym id;
char *ptr;
mrb_int off, end, len;
mrb_get_args(mrb, "o", &path);
if (mrb_symbol_p(path)) {
/* const get with symbol */
id = mrb_symbol(path);
return mrb_const_get_sym(mrb, mod, id);
}
/* const get with class path string */
path = mrb_string_type(mrb, path);
ptr = RSTRING_PTR(path);
len = RSTRING_LEN(path);
off = 0;
while (off < len) {
end = mrb_str_index_lit(mrb, path, "::", off);
end = (end == -1) ? len : end;
id = mrb_intern(mrb, ptr+off, end-off);
mod = mrb_const_get_sym(mrb, mod, id);
off = (end == len) ? end : end+2;
}
return mod;
}
static mrb_value
mrb_mod_const_set(mrb_state *mrb, mrb_value mod)
{
mrb_sym id;
mrb_value value;
mrb_get_args(mrb, "no", &id, &value);
check_const_name_sym(mrb, id);
mrb_const_set(mrb, mod, id, value);
return value;
}
static mrb_value
mrb_mod_remove_const(mrb_state *mrb, mrb_value mod)
{
mrb_sym id;
mrb_value val;
mrb_get_args(mrb, "n", &id);
check_const_name_sym(mrb, id);
val = mrb_iv_remove(mrb, mod, id);
if (mrb_undef_p(val)) {
mrb_name_error(mrb, id, "constant %S not defined", mrb_sym2str(mrb, id));
}
return val;
}
static mrb_value
mrb_mod_const_missing(mrb_state *mrb, mrb_value mod)
{
mrb_sym sym;
mrb_get_args(mrb, "n", &sym);
if (mrb_class_real(mrb_class_ptr(mod)) != mrb->object_class) {
mrb_name_error(mrb, sym, "uninitialized constant %S::%S",
mod,
mrb_sym2str(mrb, sym));
}
else {
mrb_name_error(mrb, sym, "uninitialized constant %S",
mrb_sym2str(mrb, sym));
}
/* not reached */
return mrb_nil_value();
}
static mrb_value
mrb_mod_s_constants(mrb_state *mrb, mrb_value mod)
{
mrb_raise(mrb, E_NOTIMP_ERROR, "Module.constants not implemented");
return mrb_nil_value(); /* not reached */
}
static mrb_value
mrb_mod_eqq(mrb_state *mrb, mrb_value mod)
{
mrb_value obj;
mrb_bool eqq;
mrb_get_args(mrb, "o", &obj);
eqq = mrb_obj_is_kind_of(mrb, obj, mrb_class_ptr(mod));
return mrb_bool_value(eqq);
}
MRB_API mrb_value
mrb_mod_module_function(mrb_state *mrb, mrb_value mod)
{
mrb_value *argv;
mrb_int argc, i;
mrb_sym mid;
mrb_method_t m;
struct RClass *rclass;
int ai;
mrb_check_type(mrb, mod, MRB_TT_MODULE);
mrb_get_args(mrb, "*", &argv, &argc);
if (argc == 0) {
/* set MODFUNC SCOPE if implemented */
return mod;
}
/* set PRIVATE method visibility if implemented */
/* mrb_mod_dummy_visibility(mrb, mod); */
for (i=0; i<argc; i++) {
mrb_check_type(mrb, argv[i], MRB_TT_SYMBOL);
mid = mrb_symbol(argv[i]);
rclass = mrb_class_ptr(mod);
m = mrb_method_search(mrb, rclass, mid);
prepare_singleton_class(mrb, (struct RBasic*)rclass);
ai = mrb_gc_arena_save(mrb);
mrb_define_method_raw(mrb, rclass->c, mid, m);
mrb_gc_arena_restore(mrb, ai);
}
return mod;
}
/* implementation of __id__ */
mrb_value mrb_obj_id_m(mrb_state *mrb, mrb_value self);
/* implementation of instance_eval */
mrb_value mrb_obj_instance_eval(mrb_state*, mrb_value);
/* implementation of Module.nesting */
mrb_value mrb_mod_s_nesting(mrb_state*, mrb_value);
static mrb_value
inspect_main(mrb_state *mrb, mrb_value mod)
{
return mrb_str_new_lit(mrb, "main");
}
void
mrb_init_class(mrb_state *mrb)
{
struct RClass *bob; /* BasicObject */
struct RClass *obj; /* Object */
struct RClass *mod; /* Module */
struct RClass *cls; /* Class */
/* boot class hierarchy */
bob = boot_defclass(mrb, 0);
obj = boot_defclass(mrb, bob); mrb->object_class = obj;
mod = boot_defclass(mrb, obj); mrb->module_class = mod;/* obj -> mod */
cls = boot_defclass(mrb, mod); mrb->class_class = cls; /* obj -> cls */
/* fix-up loose ends */
bob->c = obj->c = mod->c = cls->c = cls;
make_metaclass(mrb, bob);
make_metaclass(mrb, obj);
make_metaclass(mrb, mod);
make_metaclass(mrb, cls);
/* name basic classes */
mrb_define_const(mrb, bob, "BasicObject", mrb_obj_value(bob));
mrb_define_const(mrb, obj, "BasicObject", mrb_obj_value(bob));
mrb_define_const(mrb, obj, "Object", mrb_obj_value(obj));
mrb_define_const(mrb, obj, "Module", mrb_obj_value(mod));
mrb_define_const(mrb, obj, "Class", mrb_obj_value(cls));
/* name each classes */
mrb_class_name_class(mrb, NULL, bob, mrb_intern_lit(mrb, "BasicObject"));
mrb_class_name_class(mrb, NULL, obj, mrb_intern_lit(mrb, "Object")); /* 15.2.1 */
mrb_class_name_class(mrb, NULL, mod, mrb_intern_lit(mrb, "Module")); /* 15.2.2 */
mrb_class_name_class(mrb, NULL, cls, mrb_intern_lit(mrb, "Class")); /* 15.2.3 */
mrb->proc_class = mrb_define_class(mrb, "Proc", mrb->object_class); /* 15.2.17 */
MRB_SET_INSTANCE_TT(mrb->proc_class, MRB_TT_PROC);
MRB_SET_INSTANCE_TT(cls, MRB_TT_CLASS);
mrb_define_method(mrb, bob, "initialize", mrb_bob_init, MRB_ARGS_NONE());
mrb_define_method(mrb, bob, "!", mrb_bob_not, MRB_ARGS_NONE());
mrb_define_method(mrb, bob, "==", mrb_obj_equal_m, MRB_ARGS_REQ(1)); /* 15.3.1.3.1 */
mrb_define_method(mrb, bob, "!=", mrb_obj_not_equal_m, MRB_ARGS_REQ(1));
mrb_define_method(mrb, bob, "__id__", mrb_obj_id_m, MRB_ARGS_NONE()); /* 15.3.1.3.3 */
mrb_define_method(mrb, bob, "__send__", mrb_f_send, MRB_ARGS_ANY()); /* 15.3.1.3.4 */
mrb_define_method(mrb, bob, "instance_eval", mrb_obj_instance_eval, MRB_ARGS_ANY()); /* 15.3.1.3.18 */
mrb_define_class_method(mrb, cls, "new", mrb_class_new_class, MRB_ARGS_OPT(1));
mrb_define_method(mrb, cls, "superclass", mrb_class_superclass, MRB_ARGS_NONE()); /* 15.2.3.3.4 */
mrb_define_method(mrb, cls, "new", mrb_instance_new, MRB_ARGS_ANY()); /* 15.2.3.3.3 */
mrb_define_method(mrb, cls, "initialize", mrb_class_initialize, MRB_ARGS_OPT(1)); /* 15.2.3.3.1 */
mrb_define_method(mrb, cls, "inherited", mrb_bob_init, MRB_ARGS_REQ(1));
MRB_SET_INSTANCE_TT(mod, MRB_TT_MODULE);
mrb_define_method(mrb, mod, "class_variable_defined?", mrb_mod_cvar_defined, MRB_ARGS_REQ(1)); /* 15.2.2.4.16 */
mrb_define_method(mrb, mod, "class_variable_get", mrb_mod_cvar_get, MRB_ARGS_REQ(1)); /* 15.2.2.4.17 */
mrb_define_method(mrb, mod, "class_variable_set", mrb_mod_cvar_set, MRB_ARGS_REQ(2)); /* 15.2.2.4.18 */
mrb_define_method(mrb, mod, "extend_object", mrb_mod_extend_object, MRB_ARGS_REQ(1)); /* 15.2.2.4.25 */
mrb_define_method(mrb, mod, "extended", mrb_bob_init, MRB_ARGS_REQ(1)); /* 15.2.2.4.26 */
mrb_define_method(mrb, mod, "prepended", mrb_bob_init, MRB_ARGS_REQ(1));
mrb_define_method(mrb, mod, "prepend_features", mrb_mod_prepend_features, MRB_ARGS_REQ(1));
mrb_define_method(mrb, mod, "include?", mrb_mod_include_p, MRB_ARGS_REQ(1)); /* 15.2.2.4.28 */
mrb_define_method(mrb, mod, "append_features", mrb_mod_append_features, MRB_ARGS_REQ(1)); /* 15.2.2.4.10 */
mrb_define_method(mrb, mod, "class_eval", mrb_mod_module_eval, MRB_ARGS_ANY()); /* 15.2.2.4.15 */
mrb_define_method(mrb, mod, "included", mrb_bob_init, MRB_ARGS_REQ(1)); /* 15.2.2.4.29 */
mrb_define_method(mrb, mod, "included_modules", mrb_mod_included_modules, MRB_ARGS_NONE()); /* 15.2.2.4.30 */
mrb_define_method(mrb, mod, "initialize", mrb_mod_initialize, MRB_ARGS_NONE()); /* 15.2.2.4.31 */
mrb_define_method(mrb, mod, "instance_methods", mrb_mod_instance_methods, MRB_ARGS_ANY()); /* 15.2.2.4.33 */
mrb_define_method(mrb, mod, "method_defined?", mrb_mod_method_defined, MRB_ARGS_REQ(1)); /* 15.2.2.4.34 */
mrb_define_method(mrb, mod, "module_eval", mrb_mod_module_eval, MRB_ARGS_ANY()); /* 15.2.2.4.35 */
mrb_define_method(mrb, mod, "module_function", mrb_mod_module_function, MRB_ARGS_ANY());
mrb_define_method(mrb, mod, "private", mrb_mod_dummy_visibility, MRB_ARGS_ANY()); /* 15.2.2.4.36 */
mrb_define_method(mrb, mod, "protected", mrb_mod_dummy_visibility, MRB_ARGS_ANY()); /* 15.2.2.4.37 */
mrb_define_method(mrb, mod, "public", mrb_mod_dummy_visibility, MRB_ARGS_ANY()); /* 15.2.2.4.38 */
mrb_define_method(mrb, mod, "remove_class_variable", mrb_mod_remove_cvar, MRB_ARGS_REQ(1)); /* 15.2.2.4.39 */
mrb_define_method(mrb, mod, "remove_method", mrb_mod_remove_method, MRB_ARGS_ANY()); /* 15.2.2.4.41 */
mrb_define_method(mrb, mod, "method_removed", mrb_bob_init, MRB_ARGS_REQ(1));
mrb_define_method(mrb, mod, "attr_reader", mrb_mod_attr_reader, MRB_ARGS_ANY()); /* 15.2.2.4.13 */
mrb_define_method(mrb, mod, "attr_writer", mrb_mod_attr_writer, MRB_ARGS_ANY()); /* 15.2.2.4.14 */
mrb_define_method(mrb, mod, "to_s", mrb_mod_to_s, MRB_ARGS_NONE());
mrb_define_method(mrb, mod, "inspect", mrb_mod_to_s, MRB_ARGS_NONE());
mrb_define_method(mrb, mod, "alias_method", mrb_mod_alias, MRB_ARGS_ANY()); /* 15.2.2.4.8 */
mrb_define_method(mrb, mod, "ancestors", mrb_mod_ancestors, MRB_ARGS_NONE()); /* 15.2.2.4.9 */
mrb_define_method(mrb, mod, "undef_method", mrb_mod_undef, MRB_ARGS_ANY()); /* 15.2.2.4.41 */
mrb_define_method(mrb, mod, "const_defined?", mrb_mod_const_defined, MRB_ARGS_ARG(1,1)); /* 15.2.2.4.20 */
mrb_define_method(mrb, mod, "const_get", mrb_mod_const_get, MRB_ARGS_REQ(1)); /* 15.2.2.4.21 */
mrb_define_method(mrb, mod, "const_set", mrb_mod_const_set, MRB_ARGS_REQ(2)); /* 15.2.2.4.23 */
mrb_define_method(mrb, mod, "constants", mrb_mod_constants, MRB_ARGS_OPT(1)); /* 15.2.2.4.24 */
mrb_define_method(mrb, mod, "remove_const", mrb_mod_remove_const, MRB_ARGS_REQ(1)); /* 15.2.2.4.40 */
mrb_define_method(mrb, mod, "const_missing", mrb_mod_const_missing, MRB_ARGS_REQ(1));
mrb_define_method(mrb, mod, "define_method", mod_define_method, MRB_ARGS_ARG(1,1));
mrb_define_method(mrb, mod, "class_variables", mrb_mod_class_variables, MRB_ARGS_NONE()); /* 15.2.2.4.19 */
mrb_define_method(mrb, mod, "===", mrb_mod_eqq, MRB_ARGS_REQ(1));
mrb_define_class_method(mrb, mod, "constants", mrb_mod_s_constants, MRB_ARGS_ANY()); /* 15.2.2.3.1 */
mrb_define_class_method(mrb, mod, "nesting", mrb_mod_s_nesting, MRB_ARGS_REQ(0)); /* 15.2.2.3.2 */
mrb_undef_method(mrb, cls, "append_features");
mrb_undef_method(mrb, cls, "extend_object");
mrb->top_self = (struct RObject*)mrb_obj_alloc(mrb, MRB_TT_OBJECT, mrb->object_class);
mrb_define_singleton_method(mrb, mrb->top_self, "inspect", inspect_main, MRB_ARGS_NONE());
mrb_define_singleton_method(mrb, mrb->top_self, "to_s", inspect_main, MRB_ARGS_NONE());
mrb_define_singleton_method(mrb, mrb->top_self, "define_method", top_define_method, MRB_ARGS_ARG(1,1));
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_178_0 |
crossvul-cpp_data_bad_576_2 | /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* plugins/kdb/ldap/libkdb_ldap/ldap_principal2.c */
/*
* Copyright (C) 2016 by the Massachusetts Institute of Technology.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright (c) 2004-2005, Novell, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * The copyright holder's name is not used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#include "ldap_main.h"
#include "kdb_ldap.h"
#include "ldap_principal.h"
#include "princ_xdr.h"
#include "ldap_tkt_policy.h"
#include "ldap_pwd_policy.h"
#include "ldap_err.h"
#include <kadm5/admin.h>
#include <time.h>
extern char* principal_attributes[];
extern char* max_pwd_life_attr[];
static char *
getstringtime(krb5_timestamp);
krb5_error_code
berval2tl_data(struct berval *in, krb5_tl_data **out)
{
*out = (krb5_tl_data *) malloc (sizeof (krb5_tl_data));
if (*out == NULL)
return ENOMEM;
(*out)->tl_data_length = in->bv_len - 2;
(*out)->tl_data_contents = (krb5_octet *) malloc
((*out)->tl_data_length * sizeof (krb5_octet));
if ((*out)->tl_data_contents == NULL) {
free (*out);
return ENOMEM;
}
UNSTORE16_INT (in->bv_val, (*out)->tl_data_type);
memcpy ((*out)->tl_data_contents, in->bv_val + 2, (*out)->tl_data_length);
return 0;
}
/*
* look up a principal in the directory.
*/
krb5_error_code
krb5_ldap_get_principal(krb5_context context, krb5_const_principal searchfor,
unsigned int flags, krb5_db_entry **entry_ptr)
{
char *user=NULL, *filter=NULL, *filtuser=NULL;
unsigned int tree=0, ntrees=1, princlen=0;
krb5_error_code tempst=0, st=0;
char **values=NULL, **subtree=NULL, *cname=NULL;
LDAP *ld=NULL;
LDAPMessage *result=NULL, *ent=NULL;
krb5_ldap_context *ldap_context=NULL;
kdb5_dal_handle *dal_handle=NULL;
krb5_ldap_server_handle *ldap_server_handle=NULL;
krb5_principal cprinc=NULL;
krb5_boolean found=FALSE;
krb5_db_entry *entry = NULL;
*entry_ptr = NULL;
/* Clear the global error string */
krb5_clear_error_message(context);
if (searchfor == NULL)
return EINVAL;
dal_handle = context->dal_handle;
ldap_context = (krb5_ldap_context *) dal_handle->db_context;
CHECK_LDAP_HANDLE(ldap_context);
if (!is_principal_in_realm(ldap_context, searchfor)) {
st = KRB5_KDB_NOENTRY;
k5_setmsg(context, st, _("Principal does not belong to realm"));
goto cleanup;
}
if ((st=krb5_unparse_name(context, searchfor, &user)) != 0)
goto cleanup;
if ((st=krb5_ldap_unparse_principal_name(user)) != 0)
goto cleanup;
filtuser = ldap_filter_correct(user);
if (filtuser == NULL) {
st = ENOMEM;
goto cleanup;
}
princlen = strlen(FILTER) + strlen(filtuser) + 2 + 1; /* 2 for closing brackets */
if ((filter = malloc(princlen)) == NULL) {
st = ENOMEM;
goto cleanup;
}
snprintf(filter, princlen, FILTER"%s))", filtuser);
if ((st = krb5_get_subtree_info(ldap_context, &subtree, &ntrees)) != 0)
goto cleanup;
GET_HANDLE();
for (tree=0; tree < ntrees && !found; ++tree) {
LDAP_SEARCH(subtree[tree], ldap_context->lrparams->search_scope, filter, principal_attributes);
for (ent=ldap_first_entry(ld, result); ent != NULL && !found; ent=ldap_next_entry(ld, ent)) {
/* get the associated directory user information */
if ((values=ldap_get_values(ld, ent, "krbprincipalname")) != NULL) {
int i;
/* a wild-card in a principal name can return a list of kerberos principals.
* Make sure that the correct principal is returned.
* NOTE: a principalname k* in ldap server will return all the principals starting with a k
*/
for (i=0; values[i] != NULL; ++i) {
if (strcmp(values[i], user) == 0) {
found = TRUE;
break;
}
}
ldap_value_free(values);
if (!found) /* no matching principal found */
continue;
}
if ((values=ldap_get_values(ld, ent, "krbcanonicalname")) != NULL) {
if (values[0] && strcmp(values[0], user) != 0) {
/* We matched an alias, not the canonical name. */
if (flags & KRB5_KDB_FLAG_ALIAS_OK) {
st = krb5_ldap_parse_principal_name(values[0], &cname);
if (st != 0)
goto cleanup;
st = krb5_parse_name(context, cname, &cprinc);
if (st != 0)
goto cleanup;
} else /* No canonicalization, so don't return aliases. */
found = FALSE;
}
ldap_value_free(values);
if (!found)
continue;
}
entry = k5alloc(sizeof(*entry), &st);
if (entry == NULL)
goto cleanup;
if ((st = populate_krb5_db_entry(context, ldap_context, ld, ent,
cprinc ? cprinc : searchfor,
entry)) != 0)
goto cleanup;
}
ldap_msgfree(result);
result = NULL;
} /* for (tree=0 ... */
if (found) {
*entry_ptr = entry;
entry = NULL;
} else
st = KRB5_KDB_NOENTRY;
cleanup:
ldap_msgfree(result);
krb5_db_free_principal(context, entry);
if (filter)
free (filter);
if (subtree) {
for (; ntrees; --ntrees)
if (subtree[ntrees-1])
free (subtree[ntrees-1]);
free (subtree);
}
if (ldap_server_handle)
krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle);
if (user)
free(user);
if (filtuser)
free(filtuser);
if (cname)
free(cname);
if (cprinc)
krb5_free_principal(context, cprinc);
return st;
}
typedef enum{ ADD_PRINCIPAL, MODIFY_PRINCIPAL } OPERATION;
/*
* ptype is creating confusions. Additionally the logic
* surronding ptype is redundunt and can be achevied
* with the help of dn and containerdn members.
* so dropping the ptype member
*/
typedef struct _xargs_t {
char *dn;
char *linkdn;
krb5_boolean dn_from_kbd;
char *containerdn;
char *tktpolicydn;
}xargs_t;
static void
free_xargs(xargs_t xargs)
{
if (xargs.dn)
free (xargs.dn);
if (xargs.linkdn)
free(xargs.linkdn);
if (xargs.containerdn)
free (xargs.containerdn);
if (xargs.tktpolicydn)
free (xargs.tktpolicydn);
}
static krb5_error_code
process_db_args(krb5_context context, char **db_args, xargs_t *xargs,
OPERATION optype)
{
int i=0;
krb5_error_code st=0;
char *arg=NULL, *arg_val=NULL;
char **dptr=NULL;
unsigned int arg_val_len=0;
if (db_args) {
for (i=0; db_args[i]; ++i) {
arg = strtok_r(db_args[i], "=", &arg_val);
arg = (arg != NULL) ? arg : "";
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
dptr = &xargs->tktpolicydn;
} else {
if (strcmp(arg, USERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL ||
xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->dn;
} else if (strcmp(arg, CONTAINERDN_ARG) == 0) {
if (optype == MODIFY_PRINCIPAL ||
xargs->dn != NULL || xargs->containerdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->containerdn;
} else if (strcmp(arg, LINKDN_ARG) == 0) {
if (xargs->dn != NULL || xargs->linkdn != NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option not supported"),
arg);
goto cleanup;
}
dptr = &xargs->linkdn;
} else {
st = EINVAL;
k5_setmsg(context, st, _("unknown option: %s"), arg);
goto cleanup;
}
xargs->dn_from_kbd = TRUE;
if (arg_val == NULL || strlen(arg_val) == 0) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
}
if (arg_val == NULL) {
st = EINVAL;
k5_setmsg(context, st, _("%s option value missing"), arg);
goto cleanup;
}
arg_val_len = strlen(arg_val) + 1;
if (strcmp(arg, TKTPOLICY_ARG) == 0) {
if ((st = krb5_ldap_name_to_policydn (context,
arg_val,
dptr)) != 0)
goto cleanup;
} else {
*dptr = k5memdup(arg_val, arg_val_len, &st);
if (*dptr == NULL)
goto cleanup;
}
}
}
cleanup:
return st;
}
krb5int_access accessor;
static krb5_error_code
asn1_encode_sequence_of_keys(krb5_key_data *key_data, krb5_int16 n_key_data,
krb5_int32 mkvno, krb5_data **code)
{
krb5_error_code err;
ldap_seqof_key_data val;
/*
* This should be pushed back into other library initialization
* code.
*/
err = kldap_ensure_initialized ();
if (err)
return err;
val.key_data = key_data;
val.n_key_data = n_key_data;
val.mkvno = mkvno;
val.kvno = key_data[0].key_data_kvno;
return accessor.asn1_ldap_encode_sequence_of_keys(&val, code);
}
static krb5_error_code
asn1_decode_sequence_of_keys(krb5_data *in, ldap_seqof_key_data *out)
{
krb5_error_code err;
ldap_seqof_key_data *p;
int i;
memset(out, 0, sizeof(*out));
/*
* This should be pushed back into other library initialization
* code.
*/
err = kldap_ensure_initialized ();
if (err)
return err;
err = accessor.asn1_ldap_decode_sequence_of_keys(in, &p);
if (err)
return err;
/* Set kvno and key_data_ver in each key_data element. */
for (i = 0; i < p->n_key_data; i++) {
p->key_data[i].key_data_kvno = p->kvno;
/* The decoder sets key_data_ver to 1 if no salt is present, but leaves
* it at 0 if salt is present. */
if (p->key_data[i].key_data_ver == 0)
p->key_data[i].key_data_ver = 2;
}
*out = *p;
free(p);
return 0;
}
/*
* Free a NULL-terminated struct berval *array[] and all its contents.
* Does not set array to NULL after freeing it.
*/
void
free_berdata(struct berval **array)
{
int i;
if (array != NULL) {
for (i = 0; array[i] != NULL; i++) {
if (array[i]->bv_val != NULL)
free(array[i]->bv_val);
free(array[i]);
}
free(array);
}
}
/*
* Encode krb5_key_data into a berval struct for insertion into LDAP.
*/
static krb5_error_code
encode_keys(krb5_key_data *key_data_in, int n_key_data, krb5_kvno mkvno,
struct berval **bval_out)
{
krb5_error_code err = 0;
int i;
krb5_key_data *key_data = NULL;
struct berval *bval = NULL;
krb5_data *code;
*bval_out = NULL;
if (n_key_data <= 0) {
err = EINVAL;
goto cleanup;
}
/* Make a shallow copy of the key data so we can alter it. */
key_data = k5calloc(n_key_data, sizeof(*key_data), &err);
if (key_data == NULL)
goto cleanup;
memcpy(key_data, key_data_in, n_key_data * sizeof(*key_data));
/* Unpatched krb5 1.11 and 1.12 cannot decode KrbKey sequences with no salt
* field. For compatibility, always encode a salt field. */
for (i = 0; i < n_key_data; i++) {
if (key_data[i].key_data_ver == 1) {
key_data[i].key_data_ver = 2;
key_data[i].key_data_type[1] = KRB5_KDB_SALTTYPE_NORMAL;
key_data[i].key_data_length[1] = 0;
key_data[i].key_data_contents[1] = NULL;
}
}
bval = k5alloc(sizeof(struct berval), &err);
if (bval == NULL)
goto cleanup;
err = asn1_encode_sequence_of_keys(key_data, n_key_data, mkvno, &code);
if (err)
goto cleanup;
/* Steal the data pointer from code for bval and discard code. */
bval->bv_len = code->length;
bval->bv_val = code->data;
free(code);
*bval_out = bval;
bval = NULL;
cleanup:
free(key_data);
free(bval);
return err;
}
/* Decoding ASN.1 encoded key */
struct berval **
krb5_encode_krbsecretkey(krb5_key_data *key_data, int n_key_data,
krb5_kvno mkvno)
{
struct berval **ret = NULL;
int currkvno;
int num_versions = 0;
int i, j, last;
krb5_error_code err = 0;
if (n_key_data < 0)
return NULL;
/* Find the number of key versions */
if (n_key_data > 0) {
for (i = 0, num_versions = 1; i < n_key_data - 1; i++) {
if (key_data[i].key_data_kvno != key_data[i + 1].key_data_kvno)
num_versions++;
}
}
ret = calloc(num_versions + 1, sizeof(struct berval *));
if (ret == NULL) {
err = ENOMEM;
goto cleanup;
}
ret[num_versions] = NULL;
/* n_key_data may be 0 if a principal is created without a key. */
if (n_key_data == 0)
goto cleanup;
currkvno = key_data[0].key_data_kvno;
for (i = 0, last = 0, j = 0; i < n_key_data; i++) {
if (i == n_key_data - 1 || key_data[i + 1].key_data_kvno != currkvno) {
err = encode_keys(key_data + last, (krb5_int16)i - last + 1, mkvno,
&ret[j]);
if (err)
goto cleanup;
j++;
last = i + 1;
if (i < n_key_data - 1)
currkvno = key_data[i + 1].key_data_kvno;
}
}
cleanup:
if (err != 0) {
free_berdata(ret);
ret = NULL;
}
return ret;
}
/*
* Encode a principal's key history for insertion into ldap.
*/
static struct berval **
krb5_encode_histkey(osa_princ_ent_rec *princ_ent)
{
unsigned int i;
krb5_error_code err = 0;
struct berval **ret = NULL;
if (princ_ent->old_key_len <= 0)
return NULL;
ret = k5calloc(princ_ent->old_key_len + 1, sizeof(struct berval *), &err);
if (ret == NULL)
goto cleanup;
for (i = 0; i < princ_ent->old_key_len; i++) {
if (princ_ent->old_keys[i].n_key_data <= 0) {
err = EINVAL;
goto cleanup;
}
err = encode_keys(princ_ent->old_keys[i].key_data,
princ_ent->old_keys[i].n_key_data,
princ_ent->admin_history_kvno, &ret[i]);
if (err)
goto cleanup;
}
ret[princ_ent->old_key_len] = NULL;
cleanup:
if (err != 0) {
free_berdata(ret);
ret = NULL;
}
return ret;
}
static krb5_error_code
tl_data2berval (krb5_tl_data *in, struct berval **out)
{
*out = (struct berval *) malloc (sizeof (struct berval));
if (*out == NULL)
return ENOMEM;
(*out)->bv_len = in->tl_data_length + 2;
(*out)->bv_val = (char *) malloc ((*out)->bv_len);
if ((*out)->bv_val == NULL) {
free (*out);
return ENOMEM;
}
STORE16_INT((*out)->bv_val, in->tl_data_type);
memcpy ((*out)->bv_val + 2, in->tl_data_contents, in->tl_data_length);
return 0;
}
/* Parse the "require_auth" string for auth indicators, adding them to the
* krbPrincipalAuthInd attribute. */
static krb5_error_code
update_ldap_mod_auth_ind(krb5_context context, krb5_db_entry *entry,
LDAPMod ***mods)
{
int i = 0;
krb5_error_code ret;
char *auth_ind = NULL;
char *strval[10] = {};
char *ai, *ai_save = NULL;
int sv_num = sizeof(strval) / sizeof(*strval);
ret = krb5_dbe_get_string(context, entry, KRB5_KDB_SK_REQUIRE_AUTH,
&auth_ind);
if (ret || auth_ind == NULL)
goto cleanup;
ai = strtok_r(auth_ind, " ", &ai_save);
while (ai != NULL && i < sv_num) {
strval[i++] = ai;
ai = strtok_r(NULL, " ", &ai_save);
}
ret = krb5_add_str_mem_ldap_mod(mods, "krbPrincipalAuthInd",
LDAP_MOD_REPLACE, strval);
cleanup:
krb5_dbe_free_string(context, auth_ind);
return ret;
}
krb5_error_code
krb5_ldap_put_principal(krb5_context context, krb5_db_entry *entry,
char **db_args)
{
int l=0, kerberos_principal_object_type=0;
unsigned int ntrees=0, tre=0;
krb5_error_code st=0, tempst=0;
LDAP *ld=NULL;
LDAPMessage *result=NULL, *ent=NULL;
char **subtreelist = NULL;
char *user=NULL, *subtree=NULL, *principal_dn=NULL;
char **values=NULL, *strval[10]={NULL}, errbuf[1024];
char *filtuser=NULL;
struct berval **bersecretkey=NULL;
LDAPMod **mods=NULL;
krb5_boolean create_standalone=FALSE;
krb5_boolean krb_identity_exists=FALSE, establish_links=FALSE;
char *standalone_principal_dn=NULL;
krb5_tl_data *tl_data=NULL;
krb5_key_data **keys=NULL;
kdb5_dal_handle *dal_handle=NULL;
krb5_ldap_context *ldap_context=NULL;
krb5_ldap_server_handle *ldap_server_handle=NULL;
osa_princ_ent_rec princ_ent = {0};
xargs_t xargs = {0};
char *polname = NULL;
OPERATION optype;
krb5_boolean found_entry = FALSE;
/* Clear the global error string */
krb5_clear_error_message(context);
SETUP_CONTEXT();
if (ldap_context->lrparams == NULL || ldap_context->container_dn == NULL)
return EINVAL;
/* get ldap handle */
GET_HANDLE();
if (!is_principal_in_realm(ldap_context, entry->princ)) {
st = EINVAL;
k5_setmsg(context, st,
_("Principal does not belong to the default realm"));
goto cleanup;
}
/* get the principal information to act on */
if (((st=krb5_unparse_name(context, entry->princ, &user)) != 0) ||
((st=krb5_ldap_unparse_principal_name(user)) != 0))
goto cleanup;
filtuser = ldap_filter_correct(user);
if (filtuser == NULL) {
st = ENOMEM;
goto cleanup;
}
/* Identity the type of operation, it can be
* add principal or modify principal.
* hack if the entry->mask has KRB_PRINCIPAL flag set
* then it is a add operation
*/
if (entry->mask & KADM5_PRINCIPAL)
optype = ADD_PRINCIPAL;
else
optype = MODIFY_PRINCIPAL;
if (((st=krb5_get_princ_type(context, entry, &kerberos_principal_object_type)) != 0) ||
((st=krb5_get_userdn(context, entry, &principal_dn)) != 0))
goto cleanup;
if ((st=process_db_args(context, db_args, &xargs, optype)) != 0)
goto cleanup;
if (entry->mask & KADM5_LOAD) {
unsigned int tree = 0;
int numlentries = 0;
char *filter = NULL;
/* A load operation is special, will do a mix-in (add krbprinc
* attrs to a non-krb object entry) if an object exists with a
* matching krbprincipalname attribute so try to find existing
* object and set principal_dn. This assumes that the
* krbprincipalname attribute is unique (only one object entry has
* a particular krbprincipalname attribute).
*/
if (asprintf(&filter, FILTER"%s))", filtuser) < 0) {
filter = NULL;
st = ENOMEM;
goto cleanup;
}
/* get the current subtree list */
if ((st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees)) != 0)
goto cleanup;
found_entry = FALSE;
/* search for entry with matching krbprincipalname attribute */
for (tree = 0; found_entry == FALSE && tree < ntrees; ++tree) {
if (principal_dn == NULL) {
LDAP_SEARCH_1(subtreelist[tree], ldap_context->lrparams->search_scope, filter, principal_attributes, IGNORE_STATUS);
} else {
/* just look for entry with principal_dn */
LDAP_SEARCH_1(principal_dn, LDAP_SCOPE_BASE, filter, principal_attributes, IGNORE_STATUS);
}
if (st == LDAP_SUCCESS) {
numlentries = ldap_count_entries(ld, result);
if (numlentries > 1) {
free(filter);
st = EINVAL;
k5_setmsg(context, st,
_("operation can not continue, more than one "
"entry with principal name \"%s\" found"),
user);
goto cleanup;
} else if (numlentries == 1) {
found_entry = TRUE;
if (principal_dn == NULL) {
ent = ldap_first_entry(ld, result);
if (ent != NULL) {
/* setting principal_dn will cause that entry to be modified further down */
if ((principal_dn = ldap_get_dn(ld, ent)) == NULL) {
ldap_get_option (ld, LDAP_OPT_RESULT_CODE, &st);
st = set_ldap_error (context, st, 0);
free(filter);
goto cleanup;
}
}
}
}
} else if (st != LDAP_NO_SUCH_OBJECT) {
/* could not perform search, return with failure */
st = set_ldap_error (context, st, 0);
free(filter);
goto cleanup;
}
ldap_msgfree(result);
result = NULL;
/*
* If it isn't found then assume a standalone princ entry is to
* be created.
*/
} /* end for (tree = 0; principal_dn == ... */
free(filter);
if (found_entry == FALSE && principal_dn != NULL) {
/*
* if principal_dn is null then there is code further down to
* deal with setting standalone_principal_dn. Also note that
* this will set create_standalone true for
* non-mix-in entries which is okay if loading from a dump.
*/
create_standalone = TRUE;
standalone_principal_dn = strdup(principal_dn);
CHECK_NULL(standalone_principal_dn);
}
} /* end if (entry->mask & KADM5_LOAD */
/* time to generate the DN information with the help of
* containerdn, principalcontainerreference or
* realmcontainerdn information
*/
if (principal_dn == NULL && xargs.dn == NULL) { /* creation of standalone principal */
/* get the subtree information */
if (entry->princ->length == 2 && entry->princ->data[0].length == strlen("krbtgt") &&
strncmp(entry->princ->data[0].data, "krbtgt", entry->princ->data[0].length) == 0) {
/* if the principal is a inter-realm principal, always created in the realm container */
subtree = strdup(ldap_context->lrparams->realmdn);
} else if (xargs.containerdn) {
if ((st=checkattributevalue(ld, xargs.containerdn, NULL, NULL, NULL)) != 0) {
if (st == KRB5_KDB_NOENTRY || st == KRB5_KDB_CONSTRAINT_VIOLATION) {
int ost = st;
st = EINVAL;
k5_wrapmsg(context, ost, st, _("'%s' not found"),
xargs.containerdn);
}
goto cleanup;
}
subtree = strdup(xargs.containerdn);
} else if (ldap_context->lrparams->containerref && strlen(ldap_context->lrparams->containerref) != 0) {
/*
* Here the subtree should be changed with
* principalcontainerreference attribute value
*/
subtree = strdup(ldap_context->lrparams->containerref);
} else {
subtree = strdup(ldap_context->lrparams->realmdn);
}
CHECK_NULL(subtree);
if (asprintf(&standalone_principal_dn, "krbprincipalname=%s,%s",
filtuser, subtree) < 0)
standalone_principal_dn = NULL;
CHECK_NULL(standalone_principal_dn);
/*
* free subtree when you are done using the subtree
* set the boolean create_standalone to TRUE
*/
create_standalone = TRUE;
free(subtree);
subtree = NULL;
}
/*
* If the DN information is presented by the user, time to
* validate the input to ensure that the DN falls under
* any of the subtrees
*/
if (xargs.dn_from_kbd == TRUE) {
/* make sure the DN falls in the subtree */
int dnlen=0, subtreelen=0;
char *dn=NULL;
krb5_boolean outofsubtree=TRUE;
if (xargs.dn != NULL) {
dn = xargs.dn;
} else if (xargs.linkdn != NULL) {
dn = xargs.linkdn;
} else if (standalone_principal_dn != NULL) {
/*
* Even though the standalone_principal_dn is constructed
* within this function, there is the containerdn input
* from the user that can become part of the it.
*/
dn = standalone_principal_dn;
}
/* Get the current subtree list if we haven't already done so. */
if (subtreelist == NULL) {
st = krb5_get_subtree_info(ldap_context, &subtreelist, &ntrees);
if (st)
goto cleanup;
}
for (tre=0; tre<ntrees; ++tre) {
if (subtreelist[tre] == NULL || strlen(subtreelist[tre]) == 0) {
outofsubtree = FALSE;
break;
} else {
dnlen = strlen (dn);
subtreelen = strlen(subtreelist[tre]);
if ((dnlen >= subtreelen) && (strcasecmp((dn + dnlen - subtreelen), subtreelist[tre]) == 0)) {
outofsubtree = FALSE;
break;
}
}
}
if (outofsubtree == TRUE) {
st = EINVAL;
k5_setmsg(context, st, _("DN is out of the realm subtree"));
goto cleanup;
}
/*
* dn value will be set either by dn, linkdn or the standalone_principal_dn
* In the first 2 cases, the dn should be existing and in the last case we
* are supposed to create the ldap object. so the below should not be
* executed for the last case.
*/
if (standalone_principal_dn == NULL) {
/*
* If the ldap object is missing, this results in an error.
*/
/*
* Search for krbprincipalname attribute here.
* This is to find if a kerberos identity is already present
* on the ldap object, in which case adding a kerberos identity
* on the ldap object should result in an error.
*/
char *attributes[]={"krbticketpolicyreference", "krbprincipalname", NULL};
ldap_msgfree(result);
result = NULL;
LDAP_SEARCH_1(dn, LDAP_SCOPE_BASE, 0, attributes, IGNORE_STATUS);
if (st == LDAP_SUCCESS) {
ent = ldap_first_entry(ld, result);
if (ent != NULL) {
if ((values=ldap_get_values(ld, ent, "krbticketpolicyreference")) != NULL) {
ldap_value_free(values);
}
if ((values=ldap_get_values(ld, ent, "krbprincipalname")) != NULL) {
krb_identity_exists = TRUE;
ldap_value_free(values);
}
}
} else {
st = set_ldap_error(context, st, OP_SEARCH);
goto cleanup;
}
}
}
/*
* If xargs.dn is set then the request is to add a
* kerberos principal on a ldap object, but if
* there is one already on the ldap object this
* should result in an error.
*/
if (xargs.dn != NULL && krb_identity_exists == TRUE) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("ldap object is already kerberized"));
k5_setmsg(context, st, "%s", errbuf);
goto cleanup;
}
if (xargs.linkdn != NULL) {
/*
* link information can be changed using modprinc.
* However, link information can be changed only on the
* standalone kerberos principal objects. A standalone
* kerberos principal object is of type krbprincipal
* structural objectclass.
*
* NOTE: kerberos principals on an ldap object can't be
* linked to other ldap objects.
*/
if (optype == MODIFY_PRINCIPAL &&
kerberos_principal_object_type != KDB_STANDALONE_PRINCIPAL_OBJECT) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("link information can not be set/updated as the "
"kerberos principal belongs to an ldap object"));
k5_setmsg(context, st, "%s", errbuf);
goto cleanup;
}
/*
* Check the link information. If there is already a link
* existing then this operation is not allowed.
*/
{
char **linkdns=NULL;
int j=0;
if ((st=krb5_get_linkdn(context, entry, &linkdns)) != 0) {
snprintf(errbuf, sizeof(errbuf),
_("Failed getting object references"));
k5_setmsg(context, st, "%s", errbuf);
goto cleanup;
}
if (linkdns != NULL) {
st = EINVAL;
snprintf(errbuf, sizeof(errbuf),
_("kerberos principal is already linked to a ldap "
"object"));
k5_setmsg(context, st, "%s", errbuf);
for (j=0; linkdns[j] != NULL; ++j)
free (linkdns[j]);
free (linkdns);
goto cleanup;
}
}
establish_links = TRUE;
}
if (entry->mask & KADM5_LAST_SUCCESS) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->last_success)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastSuccessfulAuth", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_LAST_FAILED) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->last_failed)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastFailedAuth", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free(strval[0]);
}
if (entry->mask & KADM5_FAIL_AUTH_COUNT) {
krb5_kvno fail_auth_count;
fail_auth_count = entry->fail_auth_count;
if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT)
fail_auth_count++;
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_REPLACE,
fail_auth_count);
if (st != 0)
goto cleanup;
} else if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT) {
int attr_mask = 0;
krb5_boolean has_fail_count;
/* Check if the krbLoginFailedCount attribute exists. (Through
* krb5 1.8.1, it wasn't set in new entries.) */
st = krb5_get_attributes_mask(context, entry, &attr_mask);
if (st != 0)
goto cleanup;
has_fail_count = ((attr_mask & KDB_FAIL_AUTH_COUNT_ATTR) != 0);
/*
* If the client library and server supports RFC 4525,
* then use it to increment by one the value of the
* krbLoginFailedCount attribute. Otherwise, assert the
* (provided) old value by deleting it before adding.
*/
#ifdef LDAP_MOD_INCREMENT
if (ldap_server_handle->server_info->modify_increment &&
has_fail_count) {
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_INCREMENT, 1);
if (st != 0)
goto cleanup;
} else {
#endif /* LDAP_MOD_INCREMENT */
if (has_fail_count) {
st = krb5_add_int_mem_ldap_mod(&mods,
"krbLoginFailedCount",
LDAP_MOD_DELETE,
entry->fail_auth_count);
if (st != 0)
goto cleanup;
}
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_ADD,
entry->fail_auth_count + 1);
if (st != 0)
goto cleanup;
#ifdef LDAP_MOD_INCREMENT
}
#endif
} else if (optype == ADD_PRINCIPAL) {
/* Initialize krbLoginFailedCount in new entries to help avoid a
* race during the first failed login. */
st = krb5_add_int_mem_ldap_mod(&mods, "krbLoginFailedCount",
LDAP_MOD_ADD, 0);
}
if (entry->mask & KADM5_MAX_LIFE) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxticketlife", LDAP_MOD_REPLACE, entry->max_life)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_MAX_RLIFE) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbmaxrenewableage", LDAP_MOD_REPLACE,
entry->max_renewable_life)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_ATTRIBUTES) {
if ((st=krb5_add_int_mem_ldap_mod(&mods, "krbticketflags", LDAP_MOD_REPLACE,
entry->attributes)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_PRINCIPAL) {
memset(strval, 0, sizeof(strval));
strval[0] = user;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalname", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_PRINC_EXPIRE_TIME) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbprincipalexpiration", LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_PW_EXPIRATION) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpasswordexpiration",
LDAP_MOD_REPLACE,
strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
if (entry->mask & KADM5_POLICY || entry->mask & KADM5_KEY_HIST) {
memset(&princ_ent, 0, sizeof(princ_ent));
for (tl_data=entry->tl_data; tl_data; tl_data=tl_data->tl_data_next) {
if (tl_data->tl_data_type == KRB5_TL_KADM_DATA) {
if ((st = krb5_lookup_tl_kadm_data(tl_data, &princ_ent)) != 0) {
goto cleanup;
}
break;
}
}
}
if (entry->mask & KADM5_POLICY) {
if (princ_ent.aux_attributes & KADM5_POLICY) {
memset(strval, 0, sizeof(strval));
if ((st = krb5_ldap_name_to_policydn (context, princ_ent.policy, &polname)) != 0)
goto cleanup;
strval[0] = polname;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
} else {
st = EINVAL;
k5_setmsg(context, st, "Password policy value null");
goto cleanup;
}
} else if (entry->mask & KADM5_LOAD && found_entry == TRUE) {
/*
* a load is special in that existing entries must have attrs that
* removed.
*/
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_REPLACE, NULL)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_POLICY_CLR) {
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbpwdpolicyreference", LDAP_MOD_DELETE, NULL)) != 0)
goto cleanup;
}
if (entry->mask & KADM5_KEY_HIST) {
bersecretkey = krb5_encode_histkey(&princ_ent);
if (bersecretkey == NULL) {
st = ENOMEM;
goto cleanup;
}
st = krb5_add_ber_mem_ldap_mod(&mods, "krbpwdhistory",
LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
bersecretkey);
if (st != 0)
goto cleanup;
free_berdata(bersecretkey);
bersecretkey = NULL;
}
if (entry->mask & KADM5_KEY_DATA || entry->mask & KADM5_KVNO) {
krb5_kvno mkvno;
if ((st=krb5_dbe_lookup_mkvno(context, entry, &mkvno)) != 0)
goto cleanup;
bersecretkey = krb5_encode_krbsecretkey (entry->key_data,
entry->n_key_data, mkvno);
if (bersecretkey == NULL) {
st = ENOMEM;
goto cleanup;
}
/* An empty list of bervals is only accepted for modify operations,
* not add operations. */
if (bersecretkey[0] != NULL || !create_standalone) {
st = krb5_add_ber_mem_ldap_mod(&mods, "krbprincipalkey",
LDAP_MOD_REPLACE | LDAP_MOD_BVALUES,
bersecretkey);
if (st != 0)
goto cleanup;
}
if (!(entry->mask & KADM5_PRINCIPAL)) {
memset(strval, 0, sizeof(strval));
if ((strval[0]=getstringtime(entry->pw_expiration)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods,
"krbpasswordexpiration",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
/* Update last password change whenever a new key is set */
{
krb5_timestamp last_pw_changed;
if ((st=krb5_dbe_lookup_last_pwd_change(context, entry,
&last_pw_changed)) != 0)
goto cleanup;
memset(strval, 0, sizeof(strval));
if ((strval[0] = getstringtime(last_pw_changed)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastPwdChange",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
} /* Modify Key data ends here */
/* Auth indicators will also be stored in krbExtraData when processing
* tl_data. */
st = update_ldap_mod_auth_ind(context, entry, &mods);
if (st != 0)
goto cleanup;
/* Set tl_data */
if (entry->tl_data != NULL) {
int count = 0;
struct berval **ber_tl_data = NULL;
krb5_tl_data *ptr;
krb5_timestamp unlock_time;
for (ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) {
if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE
#ifdef SECURID
|| ptr->tl_data_type == KRB5_TL_DB_ARGS
#endif
|| ptr->tl_data_type == KRB5_TL_KADM_DATA
|| ptr->tl_data_type == KDB_TL_USER_INFO
|| ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL
|| ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK)
continue;
count++;
}
if (count != 0) {
int j;
ber_tl_data = (struct berval **) calloc (count + 1,
sizeof (struct berval*));
if (ber_tl_data == NULL) {
st = ENOMEM;
goto cleanup;
}
for (j = 0, ptr = entry->tl_data; ptr != NULL; ptr = ptr->tl_data_next) {
/* Ignore tl_data that are stored in separate directory
* attributes */
if (ptr->tl_data_type == KRB5_TL_LAST_PWD_CHANGE
#ifdef SECURID
|| ptr->tl_data_type == KRB5_TL_DB_ARGS
#endif
|| ptr->tl_data_type == KRB5_TL_KADM_DATA
|| ptr->tl_data_type == KDB_TL_USER_INFO
|| ptr->tl_data_type == KRB5_TL_CONSTRAINED_DELEGATION_ACL
|| ptr->tl_data_type == KRB5_TL_LAST_ADMIN_UNLOCK)
continue;
if ((st = tl_data2berval (ptr, &ber_tl_data[j])) != 0)
break;
j++;
}
if (st == 0) {
ber_tl_data[count] = NULL;
st=krb5_add_ber_mem_ldap_mod(&mods, "krbExtraData",
LDAP_MOD_REPLACE |
LDAP_MOD_BVALUES, ber_tl_data);
}
free_berdata(ber_tl_data);
if (st != 0)
goto cleanup;
}
if ((st=krb5_dbe_lookup_last_admin_unlock(context, entry,
&unlock_time)) != 0)
goto cleanup;
if (unlock_time != 0) {
/* Update last admin unlock */
memset(strval, 0, sizeof(strval));
if ((strval[0] = getstringtime(unlock_time)) == NULL)
goto cleanup;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbLastAdminUnlock",
LDAP_MOD_REPLACE, strval)) != 0) {
free (strval[0]);
goto cleanup;
}
free (strval[0]);
}
}
/* Directory specific attribute */
if (xargs.tktpolicydn != NULL) {
int tmask=0;
if (strlen(xargs.tktpolicydn) != 0) {
st = checkattributevalue(ld, xargs.tktpolicydn, "objectclass", policyclass, &tmask);
CHECK_CLASS_VALIDITY(st, tmask, _("ticket policy object value: "));
strval[0] = xargs.tktpolicydn;
strval[1] = NULL;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
} else {
/* if xargs.tktpolicydn is a empty string, then delete
* already existing krbticketpolicyreference attr */
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbticketpolicyreference", LDAP_MOD_DELETE, NULL)) != 0)
goto cleanup;
}
}
if (establish_links == TRUE) {
memset(strval, 0, sizeof(strval));
strval[0] = xargs.linkdn;
if ((st=krb5_add_str_mem_ldap_mod(&mods, "krbObjectReferences", LDAP_MOD_REPLACE, strval)) != 0)
goto cleanup;
}
/*
* in case mods is NULL then return
* not sure but can happen in a modprinc
* so no need to return an error
* addprinc will at least have the principal name
* and the keys passed in
*/
if (mods == NULL)
goto cleanup;
if (create_standalone == TRUE) {
memset(strval, 0, sizeof(strval));
strval[0] = "krbprincipal";
strval[1] = "krbprincipalaux";
strval[2] = "krbTicketPolicyAux";
if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0)
goto cleanup;
st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL);
if (st == LDAP_ALREADY_EXISTS && entry->mask & KADM5_LOAD) {
/* a load operation must replace an existing entry */
st = ldap_delete_ext_s(ld, standalone_principal_dn, NULL, NULL);
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf),
_("Principal delete failed (trying to replace "
"entry): %s"), ldap_err2string(st));
st = translate_ldap_error (st, OP_ADD);
k5_setmsg(context, st, "%s", errbuf);
goto cleanup;
} else {
st = ldap_add_ext_s(ld, standalone_principal_dn, mods, NULL, NULL);
}
}
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf), _("Principal add failed: %s"),
ldap_err2string(st));
st = translate_ldap_error (st, OP_ADD);
k5_setmsg(context, st, "%s", errbuf);
goto cleanup;
}
} else {
/*
* Here existing ldap object is modified and can be related
* to any attribute, so always ensure that the ldap
* object is extended with all the kerberos related
* objectclasses so that there are no constraint
* violations.
*/
{
char *attrvalues[] = {"krbprincipalaux", "krbTicketPolicyAux", NULL};
int p, q, r=0, amask=0;
if ((st=checkattributevalue(ld, (xargs.dn) ? xargs.dn : principal_dn,
"objectclass", attrvalues, &amask)) != 0)
goto cleanup;
memset(strval, 0, sizeof(strval));
for (p=1, q=0; p<=2; p<<=1, ++q) {
if ((p & amask) == 0)
strval[r++] = attrvalues[q];
}
if (r != 0) {
if ((st=krb5_add_str_mem_ldap_mod(&mods, "objectclass", LDAP_MOD_ADD, strval)) != 0)
goto cleanup;
}
}
if (xargs.dn != NULL)
st=ldap_modify_ext_s(ld, xargs.dn, mods, NULL, NULL);
else
st = ldap_modify_ext_s(ld, principal_dn, mods, NULL, NULL);
if (st != LDAP_SUCCESS) {
snprintf(errbuf, sizeof(errbuf), _("User modification failed: %s"),
ldap_err2string(st));
st = translate_ldap_error (st, OP_MOD);
k5_setmsg(context, st, "%s", errbuf);
goto cleanup;
}
if (entry->mask & KADM5_FAIL_AUTH_COUNT_INCREMENT)
entry->fail_auth_count++;
}
cleanup:
if (user)
free(user);
if (filtuser)
free(filtuser);
free_xargs(xargs);
if (standalone_principal_dn)
free(standalone_principal_dn);
if (principal_dn)
free (principal_dn);
if (polname != NULL)
free(polname);
for (tre = 0; tre < ntrees; tre++)
free(subtreelist[tre]);
free(subtreelist);
if (subtree)
free (subtree);
if (bersecretkey) {
for (l=0; bersecretkey[l]; ++l) {
if (bersecretkey[l]->bv_val)
free (bersecretkey[l]->bv_val);
free (bersecretkey[l]);
}
free (bersecretkey);
}
if (keys)
free (keys);
ldap_mods_free(mods, 1);
ldap_osa_free_princ_ent(&princ_ent);
ldap_msgfree(result);
krb5_ldap_put_handle_to_pool(ldap_context, ldap_server_handle);
return(st);
}
krb5_error_code
krb5_read_tkt_policy(krb5_context context, krb5_ldap_context *ldap_context,
krb5_db_entry *entries, char *policy)
{
krb5_error_code st=0;
int mask=0, omask=0;
int tkt_mask=(KDB_MAX_LIFE_ATTR | KDB_MAX_RLIFE_ATTR | KDB_TKT_FLAGS_ATTR);
krb5_ldap_policy_params *tktpoldnparam=NULL;
if ((st=krb5_get_attributes_mask(context, entries, &mask)) != 0)
goto cleanup;
if ((mask & tkt_mask) == tkt_mask)
goto cleanup;
if (policy != NULL) {
st = krb5_ldap_read_policy(context, policy, &tktpoldnparam, &omask);
if (st && st != KRB5_KDB_NOENTRY) {
k5_prependmsg(context, st, _("Error reading ticket policy"));
goto cleanup;
}
st = 0; /* reset the return status */
}
if ((mask & KDB_MAX_LIFE_ATTR) == 0) {
if ((omask & KDB_MAX_LIFE_ATTR) == KDB_MAX_LIFE_ATTR)
entries->max_life = tktpoldnparam->maxtktlife;
else if (ldap_context->lrparams->max_life)
entries->max_life = ldap_context->lrparams->max_life;
}
if ((mask & KDB_MAX_RLIFE_ATTR) == 0) {
if ((omask & KDB_MAX_RLIFE_ATTR) == KDB_MAX_RLIFE_ATTR)
entries->max_renewable_life = tktpoldnparam->maxrenewlife;
else if (ldap_context->lrparams->max_renewable_life)
entries->max_renewable_life = ldap_context->lrparams->max_renewable_life;
}
if ((mask & KDB_TKT_FLAGS_ATTR) == 0) {
if ((omask & KDB_TKT_FLAGS_ATTR) == KDB_TKT_FLAGS_ATTR)
entries->attributes = tktpoldnparam->tktflags;
else if (ldap_context->lrparams->tktflags)
entries->attributes |= ldap_context->lrparams->tktflags;
}
krb5_ldap_free_policy(context, tktpoldnparam);
cleanup:
return st;
}
static void
free_ldap_seqof_key_data(ldap_seqof_key_data *keysets, krb5_int16 n_keysets)
{
int i;
if (keysets == NULL)
return;
for (i = 0; i < n_keysets; i++)
k5_free_key_data(keysets[i].n_key_data, keysets[i].key_data);
free(keysets);
}
/*
* Decode keys from ldap search results.
*
* Arguments:
* - bvalues
* The ldap search results containing the key data.
* - mkvno
* The master kvno that the keys were encrypted with.
* - keysets_out
* The decoded keys in a ldap_seqof_key_data struct. Must be freed using
* free_ldap_seqof_key_data.
* - n_keysets_out
* The number of entries in keys_out.
* - total_keys_out
* An optional argument that if given will be set to the total number of
* keys found throughout all the entries: sum(keys_out.n_key_data)
* May be NULL.
*/
static krb5_error_code
decode_keys(struct berval **bvalues, ldap_seqof_key_data **keysets_out,
krb5_int16 *n_keysets_out, krb5_int16 *total_keys_out)
{
krb5_error_code err = 0;
krb5_int16 n_keys, i, ki, total_keys;
ldap_seqof_key_data *keysets = NULL;
*keysets_out = NULL;
*n_keysets_out = 0;
if (total_keys_out)
*total_keys_out = 0;
/* Precount the number of keys. */
for (n_keys = 0, i = 0; bvalues[i] != NULL; i++) {
if (bvalues[i]->bv_len > 0)
n_keys++;
}
keysets = k5calloc(n_keys, sizeof(ldap_seqof_key_data), &err);
if (keysets == NULL)
goto cleanup;
memset(keysets, 0, n_keys * sizeof(ldap_seqof_key_data));
for (i = 0, ki = 0, total_keys = 0; bvalues[i] != NULL; i++) {
krb5_data in;
if (bvalues[i]->bv_len == 0)
continue;
in.length = bvalues[i]->bv_len;
in.data = bvalues[i]->bv_val;
err = asn1_decode_sequence_of_keys(&in, &keysets[ki]);
if (err)
goto cleanup;
if (total_keys_out)
total_keys += keysets[ki].n_key_data;
ki++;
}
if (total_keys_out)
*total_keys_out = total_keys;
*n_keysets_out = n_keys;
*keysets_out = keysets;
keysets = NULL;
n_keys = 0;
cleanup:
free_ldap_seqof_key_data(keysets, n_keys);
return err;
}
krb5_error_code
krb5_decode_krbsecretkey(krb5_context context, krb5_db_entry *entries,
struct berval **bvalues, krb5_kvno *mkvno)
{
krb5_key_data *key_data = NULL, *tmp;
krb5_error_code err = 0;
ldap_seqof_key_data *keysets = NULL;
krb5_int16 i, n_keysets = 0, total_keys = 0;
err = decode_keys(bvalues, &keysets, &n_keysets, &total_keys);
if (err != 0) {
k5_prependmsg(context, err,
_("unable to decode stored principal key data"));
goto cleanup;
}
key_data = k5calloc(total_keys, sizeof(krb5_key_data), &err);
if (key_data == NULL)
goto cleanup;
memset(key_data, 0, total_keys * sizeof(krb5_key_data));
if (n_keysets > 0)
*mkvno = keysets[0].mkvno;
/* Transfer key data values from keysets to a flat list in entries. */
tmp = key_data;
for (i = 0; i < n_keysets; i++) {
memcpy(tmp, keysets[i].key_data,
sizeof(krb5_key_data) * keysets[i].n_key_data);
tmp += keysets[i].n_key_data;
keysets[i].n_key_data = 0;
}
entries->n_key_data = total_keys;
entries->key_data = key_data;
key_data = NULL;
cleanup:
free_ldap_seqof_key_data(keysets, n_keysets);
k5_free_key_data(total_keys, key_data);
return err;
}
static int
compare_osa_pw_hist_ent(const void *left_in, const void *right_in)
{
int kvno_left, kvno_right;
osa_pw_hist_ent *left = (osa_pw_hist_ent *)left_in;
osa_pw_hist_ent *right = (osa_pw_hist_ent *)right_in;
kvno_left = left->n_key_data ? left->key_data[0].key_data_kvno : 0;
kvno_right = right->n_key_data ? right->key_data[0].key_data_kvno : 0;
return kvno_left - kvno_right;
}
/*
* Decode the key history entries from an LDAP search.
*
* NOTE: the caller must free princ_ent->old_keys even on error.
*/
krb5_error_code
krb5_decode_histkey(krb5_context context, struct berval **bvalues,
osa_princ_ent_rec *princ_ent)
{
krb5_error_code err = 0;
krb5_int16 i, n_keysets = 0;
ldap_seqof_key_data *keysets = NULL;
err = decode_keys(bvalues, &keysets, &n_keysets, NULL);
if (err != 0) {
k5_prependmsg(context, err,
_("unable to decode stored principal pw history"));
goto cleanup;
}
princ_ent->old_keys = k5calloc(n_keysets, sizeof(osa_pw_hist_ent), &err);
if (princ_ent->old_keys == NULL)
goto cleanup;
princ_ent->old_key_len = n_keysets;
if (n_keysets > 0)
princ_ent->admin_history_kvno = keysets[0].mkvno;
/* Transfer key data pointers from keysets to princ_ent. */
for (i = 0; i < n_keysets; i++) {
princ_ent->old_keys[i].n_key_data = keysets[i].n_key_data;
princ_ent->old_keys[i].key_data = keysets[i].key_data;
keysets[i].n_key_data = 0;
keysets[i].key_data = NULL;
}
/* Sort the principal entries by kvno in ascending order. */
qsort(princ_ent->old_keys, princ_ent->old_key_len, sizeof(osa_pw_hist_ent),
&compare_osa_pw_hist_ent);
princ_ent->aux_attributes |= KADM5_KEY_HIST;
/* Set the next key to the end of the list. The queue will be lengthened
* if it isn't full yet; the first entry will be replaced if it is full. */
princ_ent->old_key_next = princ_ent->old_key_len;
cleanup:
free_ldap_seqof_key_data(keysets, n_keysets);
return err;
}
static char *
getstringtime(krb5_timestamp epochtime)
{
struct tm tme;
char *strtime=NULL;
time_t posixtime = ts2tt(epochtime);
strtime = calloc (50, 1);
if (strtime == NULL)
return NULL;
if (gmtime_r(&posixtime, &tme) == NULL)
return NULL;
strftime(strtime, 50, "%Y%m%d%H%M%SZ", &tme);
return strtime;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_576_2 |
crossvul-cpp_data_good_3060_3 | /*
* fs/cifs/cifsacl.c
*
* Copyright (C) International Business Machines Corp., 2007,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Contains the routines for mapping CIFS/NTFS ACLs
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/keyctl.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsacl.h"
#include "cifsproto.h"
#include "cifs_debug.h"
/* security id for everyone/world system group */
static const struct cifs_sid sid_everyone = {
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
/* security id for Authenticated Users system group */
static const struct cifs_sid sid_authusers = {
1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
/* group users */
static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
static const struct cred *root_cred;
static int
cifs_idmap_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
char *payload;
/*
* If the payload is less than or equal to the size of a pointer, then
* an allocation here is wasteful. Just copy the data directly to the
* payload.value union member instead.
*
* With this however, you must check the datalen before trying to
* dereference payload.data!
*/
if (prep->datalen <= sizeof(key->payload)) {
key->payload.value = 0;
memcpy(&key->payload.value, prep->data, prep->datalen);
key->datalen = prep->datalen;
return 0;
}
payload = kmemdup(prep->data, prep->datalen, GFP_KERNEL);
if (!payload)
return -ENOMEM;
key->payload.data = payload;
key->datalen = prep->datalen;
return 0;
}
static inline void
cifs_idmap_key_destroy(struct key *key)
{
if (key->datalen > sizeof(key->payload))
kfree(key->payload.data);
}
static struct key_type cifs_idmap_key_type = {
.name = "cifs.idmap",
.instantiate = cifs_idmap_key_instantiate,
.destroy = cifs_idmap_key_destroy,
.describe = user_describe,
};
static char *
sid_to_key_str(struct cifs_sid *sidptr, unsigned int type)
{
int i, len;
unsigned int saval;
char *sidstr, *strptr;
unsigned long long id_auth_val;
/* 3 bytes for prefix */
sidstr = kmalloc(3 + SID_STRING_BASE_SIZE +
(SID_STRING_SUBAUTH_SIZE * sidptr->num_subauth),
GFP_KERNEL);
if (!sidstr)
return sidstr;
strptr = sidstr;
len = sprintf(strptr, "%cs:S-%hhu", type == SIDOWNER ? 'o' : 'g',
sidptr->revision);
strptr += len;
/* The authority field is a single 48-bit number */
id_auth_val = (unsigned long long)sidptr->authority[5];
id_auth_val |= (unsigned long long)sidptr->authority[4] << 8;
id_auth_val |= (unsigned long long)sidptr->authority[3] << 16;
id_auth_val |= (unsigned long long)sidptr->authority[2] << 24;
id_auth_val |= (unsigned long long)sidptr->authority[1] << 32;
id_auth_val |= (unsigned long long)sidptr->authority[0] << 48;
/*
* MS-DTYP states that if the authority is >= 2^32, then it should be
* expressed as a hex value.
*/
if (id_auth_val <= UINT_MAX)
len = sprintf(strptr, "-%llu", id_auth_val);
else
len = sprintf(strptr, "-0x%llx", id_auth_val);
strptr += len;
for (i = 0; i < sidptr->num_subauth; ++i) {
saval = le32_to_cpu(sidptr->sub_auth[i]);
len = sprintf(strptr, "-%u", saval);
strptr += len;
}
return sidstr;
}
/*
* if the two SIDs (roughly equivalent to a UUID for a user or group) are
* the same returns zero, if they do not match returns non-zero.
*/
static int
compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
{
int i;
int num_subauth, num_sat, num_saw;
if ((!ctsid) || (!cwsid))
return 1;
/* compare the revision */
if (ctsid->revision != cwsid->revision) {
if (ctsid->revision > cwsid->revision)
return 1;
else
return -1;
}
/* compare all of the six auth values */
for (i = 0; i < NUM_AUTHS; ++i) {
if (ctsid->authority[i] != cwsid->authority[i]) {
if (ctsid->authority[i] > cwsid->authority[i])
return 1;
else
return -1;
}
}
/* compare all of the subauth values if any */
num_sat = ctsid->num_subauth;
num_saw = cwsid->num_subauth;
num_subauth = num_sat < num_saw ? num_sat : num_saw;
if (num_subauth) {
for (i = 0; i < num_subauth; ++i) {
if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
if (le32_to_cpu(ctsid->sub_auth[i]) >
le32_to_cpu(cwsid->sub_auth[i]))
return 1;
else
return -1;
}
}
}
return 0; /* sids compare/match */
}
static void
cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
{
int i;
dst->revision = src->revision;
dst->num_subauth = min_t(u8, src->num_subauth, SID_MAX_SUB_AUTHORITIES);
for (i = 0; i < NUM_AUTHS; ++i)
dst->authority[i] = src->authority[i];
for (i = 0; i < dst->num_subauth; ++i)
dst->sub_auth[i] = src->sub_auth[i];
}
static int
id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
{
int rc;
struct key *sidkey;
struct cifs_sid *ksid;
unsigned int ksid_size;
char desc[3 + 10 + 1]; /* 3 byte prefix + 10 bytes for value + NULL */
const struct cred *saved_cred;
rc = snprintf(desc, sizeof(desc), "%ci:%u",
sidtype == SIDOWNER ? 'o' : 'g', cid);
if (rc >= sizeof(desc))
return -EINVAL;
rc = 0;
saved_cred = override_creds(root_cred);
sidkey = request_key(&cifs_idmap_key_type, desc, "");
if (IS_ERR(sidkey)) {
rc = -EINVAL;
cifs_dbg(FYI, "%s: Can't map %cid %u to a SID\n",
__func__, sidtype == SIDOWNER ? 'u' : 'g', cid);
goto out_revert_creds;
} else if (sidkey->datalen < CIFS_SID_BASE_SIZE) {
rc = -EIO;
cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n",
__func__, sidkey->datalen);
goto invalidate_key;
}
/*
* A sid is usually too large to be embedded in payload.value, but if
* there are no subauthorities and the host has 8-byte pointers, then
* it could be.
*/
ksid = sidkey->datalen <= sizeof(sidkey->payload) ?
(struct cifs_sid *)&sidkey->payload.value :
(struct cifs_sid *)sidkey->payload.data;
ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32));
if (ksid_size > sidkey->datalen) {
rc = -EIO;
cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu, ksid_size=%u)\n",
__func__, sidkey->datalen, ksid_size);
goto invalidate_key;
}
cifs_copy_sid(ssid, ksid);
out_key_put:
key_put(sidkey);
out_revert_creds:
revert_creds(saved_cred);
return rc;
invalidate_key:
key_invalidate(sidkey);
goto out_key_put;
}
static int
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
struct cifs_fattr *fattr, uint sidtype)
{
int rc;
struct key *sidkey;
char *sidstr;
const struct cred *saved_cred;
kuid_t fuid = cifs_sb->mnt_uid;
kgid_t fgid = cifs_sb->mnt_gid;
/*
* If we have too many subauthorities, then something is really wrong.
* Just return an error.
*/
if (unlikely(psid->num_subauth > SID_MAX_SUB_AUTHORITIES)) {
cifs_dbg(FYI, "%s: %u subauthorities is too many!\n",
__func__, psid->num_subauth);
return -EIO;
}
sidstr = sid_to_key_str(psid, sidtype);
if (!sidstr)
return -ENOMEM;
saved_cred = override_creds(root_cred);
sidkey = request_key(&cifs_idmap_key_type, sidstr, "");
if (IS_ERR(sidkey)) {
rc = -EINVAL;
cifs_dbg(FYI, "%s: Can't map SID %s to a %cid\n",
__func__, sidstr, sidtype == SIDOWNER ? 'u' : 'g');
goto out_revert_creds;
}
/*
* FIXME: Here we assume that uid_t and gid_t are same size. It's
* probably a safe assumption but might be better to check based on
* sidtype.
*/
BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
if (sidkey->datalen != sizeof(uid_t)) {
rc = -EIO;
cifs_dbg(FYI, "%s: Downcall contained malformed key (datalen=%hu)\n",
__func__, sidkey->datalen);
key_invalidate(sidkey);
goto out_key_put;
}
if (sidtype == SIDOWNER) {
kuid_t uid;
uid_t id;
memcpy(&id, &sidkey->payload.value, sizeof(uid_t));
uid = make_kuid(&init_user_ns, id);
if (uid_valid(uid))
fuid = uid;
} else {
kgid_t gid;
gid_t id;
memcpy(&id, &sidkey->payload.value, sizeof(gid_t));
gid = make_kgid(&init_user_ns, id);
if (gid_valid(gid))
fgid = gid;
}
out_key_put:
key_put(sidkey);
out_revert_creds:
revert_creds(saved_cred);
kfree(sidstr);
/*
* Note that we return 0 here unconditionally. If the mapping
* fails then we just fall back to using the mnt_uid/mnt_gid.
*/
if (sidtype == SIDOWNER)
fattr->cf_uid = fuid;
else
fattr->cf_gid = fgid;
return 0;
}
int
init_cifs_idmap(void)
{
struct cred *cred;
struct key *keyring;
int ret;
cifs_dbg(FYI, "Registering the %s key type\n",
cifs_idmap_key_type.name);
/* create an override credential set with a special thread keyring in
* which requests are cached
*
* this is used to prevent malicious redirections from being installed
* with add_key().
*/
cred = prepare_kernel_cred(NULL);
if (!cred)
return -ENOMEM;
keyring = keyring_alloc(".cifs_idmap",
GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred,
(KEY_POS_ALL & ~KEY_POS_SETATTR) |
KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_NOT_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto failed_put_cred;
}
ret = register_key_type(&cifs_idmap_key_type);
if (ret < 0)
goto failed_put_key;
/* instruct request_key() to use this special keyring as a cache for
* the results it looks up */
set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
cred->thread_keyring = keyring;
cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
root_cred = cred;
cifs_dbg(FYI, "cifs idmap keyring: %d\n", key_serial(keyring));
return 0;
failed_put_key:
key_put(keyring);
failed_put_cred:
put_cred(cred);
return ret;
}
void
exit_cifs_idmap(void)
{
key_revoke(root_cred->thread_keyring);
unregister_key_type(&cifs_idmap_key_type);
put_cred(root_cred);
cifs_dbg(FYI, "Unregistered %s key type\n", cifs_idmap_key_type.name);
}
/* copy ntsd, owner sid, and group sid from a security descriptor to another */
static void copy_sec_desc(const struct cifs_ntsd *pntsd,
struct cifs_ntsd *pnntsd, __u32 sidsoffset)
{
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
/* copy security descriptor control portion */
pnntsd->revision = pntsd->revision;
pnntsd->type = pntsd->type;
pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
pnntsd->sacloffset = 0;
pnntsd->osidoffset = cpu_to_le32(sidsoffset);
pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
/* copy owner sid */
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
/* copy group sid */
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
sizeof(struct cifs_sid));
cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
return;
}
/*
change posix mode to reflect permissions
pmode is the existing mode (we only want to overwrite part of this
bits to set can be: S_IRWXU, S_IRWXG or S_IRWXO ie 00700 or 00070 or 00007
*/
static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
umode_t *pbits_to_set)
{
__u32 flags = le32_to_cpu(ace_flags);
/* the order of ACEs is important. The canonical order is to begin with
DENY entries followed by ALLOW, otherwise an allow entry could be
encountered first, making the subsequent deny entry like "dead code"
which would be superflous since Windows stops when a match is made
for the operation you are trying to perform for your user */
/* For deny ACEs we change the mask so that subsequent allow access
control entries do not turn on the bits we are denying */
if (type == ACCESS_DENIED) {
if (flags & GENERIC_ALL)
*pbits_to_set &= ~S_IRWXUGO;
if ((flags & GENERIC_WRITE) ||
((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
*pbits_to_set &= ~S_IWUGO;
if ((flags & GENERIC_READ) ||
((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
*pbits_to_set &= ~S_IRUGO;
if ((flags & GENERIC_EXECUTE) ||
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pbits_to_set &= ~S_IXUGO;
return;
} else if (type != ACCESS_ALLOWED) {
cifs_dbg(VFS, "unknown access control type %d\n", type);
return;
}
/* else ACCESS_ALLOWED type */
if (flags & GENERIC_ALL) {
*pmode |= (S_IRWXUGO & (*pbits_to_set));
cifs_dbg(NOISY, "all perms\n");
return;
}
if ((flags & GENERIC_WRITE) ||
((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
*pmode |= (S_IWUGO & (*pbits_to_set));
if ((flags & GENERIC_READ) ||
((flags & FILE_READ_RIGHTS) == FILE_READ_RIGHTS))
*pmode |= (S_IRUGO & (*pbits_to_set));
if ((flags & GENERIC_EXECUTE) ||
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
return;
}
/*
Generate access flags to reflect permissions mode is the existing mode.
This function is called for every ACE in the DACL whose SID matches
with either owner or group or everyone.
*/
static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
__u32 *pace_flags)
{
/* reset access mask */
*pace_flags = 0x0;
/* bits to use are either S_IRWXU or S_IRWXG or S_IRWXO */
mode &= bits_to_use;
/* check for R/W/X UGO since we do not know whose flags
is this but we have cleared all the bits sans RWX for
either user or group or other as per bits_to_use */
if (mode & S_IRUGO)
*pace_flags |= SET_FILE_READ_RIGHTS;
if (mode & S_IWUGO)
*pace_flags |= SET_FILE_WRITE_RIGHTS;
if (mode & S_IXUGO)
*pace_flags |= SET_FILE_EXEC_RIGHTS;
cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
mode, *pace_flags);
return;
}
static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
const struct cifs_sid *psid, __u64 nmode, umode_t bits)
{
int i;
__u16 size = 0;
__u32 access_req = 0;
pntace->type = ACCESS_ALLOWED;
pntace->flags = 0x0;
mode_to_access_flags(nmode, bits, &access_req);
if (!access_req)
access_req = SET_MINIMUM_RIGHTS;
pntace->access_req = cpu_to_le32(access_req);
pntace->sid.revision = psid->revision;
pntace->sid.num_subauth = psid->num_subauth;
for (i = 0; i < NUM_AUTHS; i++)
pntace->sid.authority[i] = psid->authority[i];
for (i = 0; i < psid->num_subauth; i++)
pntace->sid.sub_auth[i] = psid->sub_auth[i];
size = 1 + 1 + 2 + 4 + 1 + 1 + 6 + (psid->num_subauth * 4);
pntace->size = cpu_to_le16(size);
return size;
}
#ifdef CONFIG_CIFS_DEBUG2
static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
{
int num_subauth;
/* validate that we do not go past end of acl */
if (le16_to_cpu(pace->size) < 16) {
cifs_dbg(VFS, "ACE too small %d\n", le16_to_cpu(pace->size));
return;
}
if (end_of_acl < (char *)pace + le16_to_cpu(pace->size)) {
cifs_dbg(VFS, "ACL too small to parse ACE\n");
return;
}
num_subauth = pace->sid.num_subauth;
if (num_subauth) {
int i;
cifs_dbg(FYI, "ACE revision %d num_auth %d type %d flags %d size %d\n",
pace->sid.revision, pace->sid.num_subauth, pace->type,
pace->flags, le16_to_cpu(pace->size));
for (i = 0; i < num_subauth; ++i) {
cifs_dbg(FYI, "ACE sub_auth[%d]: 0x%x\n",
i, le32_to_cpu(pace->sid.sub_auth[i]));
}
/* BB add length check to make sure that we do not have huge
num auths and therefore go off the end */
}
return;
}
#endif
static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
struct cifs_fattr *fattr)
{
int i;
int num_aces = 0;
int acl_size;
char *acl_base;
struct cifs_ace **ppace;
/* BB need to add parm so we can store the SID BB */
if (!pdacl) {
/* no DACL in the security descriptor, set
all the permissions for user/group/other */
fattr->cf_mode |= S_IRWXUGO;
return;
}
/* validate that we do not go past end of acl */
if (end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
cifs_dbg(VFS, "ACL too small to parse DACL\n");
return;
}
cifs_dbg(NOISY, "DACL revision %d size %d num aces %d\n",
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
le32_to_cpu(pdacl->num_aces));
/* reset rwx permissions for user/group/other.
Also, if num_aces is 0 i.e. DACL has no ACEs,
user/group/other have no permissions */
fattr->cf_mode &= ~(S_IRWXUGO);
acl_base = (char *)pdacl;
acl_size = sizeof(struct cifs_acl);
num_aces = le32_to_cpu(pdacl->num_aces);
if (num_aces > 0) {
umode_t user_mask = S_IRWXU;
umode_t group_mask = S_IRWXG;
umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
return;
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
GFP_KERNEL);
if (!ppace)
return;
for (i = 0; i < num_aces; ++i) {
ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
#ifdef CONFIG_CIFS_DEBUG2
dump_ace(ppace[i], end_of_acl);
#endif
if (compare_sids(&(ppace[i]->sid), pownersid) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
&user_mask);
if (compare_sids(&(ppace[i]->sid), pgrpsid) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
&group_mask);
if (compare_sids(&(ppace[i]->sid), &sid_everyone) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
&other_mask);
if (compare_sids(&(ppace[i]->sid), &sid_authusers) == 0)
access_flags_to_mode(ppace[i]->access_req,
ppace[i]->type,
&fattr->cf_mode,
&other_mask);
/* memcpy((void *)(&(cifscred->aces[i])),
(void *)ppace[i],
sizeof(struct cifs_ace)); */
acl_base = (char *)ppace[i];
acl_size = le16_to_cpu(ppace[i]->size);
}
kfree(ppace);
}
return;
}
static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
struct cifs_sid *pgrpsid, __u64 nmode)
{
u16 size = 0;
struct cifs_acl *pnndacl;
pnndacl = (struct cifs_acl *)((char *)pndacl + sizeof(struct cifs_acl));
size += fill_ace_for_sid((struct cifs_ace *) ((char *)pnndacl + size),
pownersid, nmode, S_IRWXU);
size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
pgrpsid, nmode, S_IRWXG);
size += fill_ace_for_sid((struct cifs_ace *)((char *)pnndacl + size),
&sid_everyone, nmode, S_IRWXO);
pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
pndacl->num_aces = cpu_to_le32(3);
return 0;
}
static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
{
/* BB need to add parm so we can store the SID BB */
/* validate that we do not go past end of ACL - sid must be at least 8
bytes long (assuming no sub-auths - e.g. the null SID */
if (end_of_acl < (char *)psid + 8) {
cifs_dbg(VFS, "ACL too small to parse SID %p\n", psid);
return -EINVAL;
}
#ifdef CONFIG_CIFS_DEBUG2
if (psid->num_subauth) {
int i;
cifs_dbg(FYI, "SID revision %d num_auth %d\n",
psid->revision, psid->num_subauth);
for (i = 0; i < psid->num_subauth; i++) {
cifs_dbg(FYI, "SID sub_auth[%d]: 0x%x\n",
i, le32_to_cpu(psid->sub_auth[i]));
}
/* BB add length check to make sure that we do not have huge
num auths and therefore go off the end */
cifs_dbg(FYI, "RID 0x%x\n",
le32_to_cpu(psid->sub_auth[psid->num_subauth-1]));
}
#endif
return 0;
}
/* Convert CIFS ACL to POSIX form */
static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr)
{
int rc = 0;
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
char *end_of_acl = ((char *)pntsd) + acl_len;
__u32 dacloffset;
if (pntsd == NULL)
return -EIO;
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
dacloffset = le32_to_cpu(pntsd->dacloffset);
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
le32_to_cpu(pntsd->gsidoffset),
le32_to_cpu(pntsd->sacloffset), dacloffset);
/* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
rc = parse_sid(owner_sid_ptr, end_of_acl);
if (rc) {
cifs_dbg(FYI, "%s: Error %d parsing Owner SID\n", __func__, rc);
return rc;
}
rc = sid_to_id(cifs_sb, owner_sid_ptr, fattr, SIDOWNER);
if (rc) {
cifs_dbg(FYI, "%s: Error %d mapping Owner SID to uid\n",
__func__, rc);
return rc;
}
rc = parse_sid(group_sid_ptr, end_of_acl);
if (rc) {
cifs_dbg(FYI, "%s: Error %d mapping Owner SID to gid\n",
__func__, rc);
return rc;
}
rc = sid_to_id(cifs_sb, group_sid_ptr, fattr, SIDGROUP);
if (rc) {
cifs_dbg(FYI, "%s: Error %d mapping Group SID to gid\n",
__func__, rc);
return rc;
}
if (dacloffset)
parse_dacl(dacl_ptr, end_of_acl, owner_sid_ptr,
group_sid_ptr, fattr);
else
cifs_dbg(FYI, "no ACL\n"); /* BB grant all or default perms? */
return rc;
}
/* Convert permission bits from mode to equivalent CIFS ACL */
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
__u32 secdesclen, __u64 nmode, kuid_t uid, kgid_t gid, int *aclflag)
{
int rc = 0;
__u32 dacloffset;
__u32 ndacloffset;
__u32 sidsoffset;
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
if (nmode != NO_CHANGE_64) { /* chmod */
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset));
dacloffset = le32_to_cpu(pntsd->dacloffset);
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
ndacloffset = sizeof(struct cifs_ntsd);
ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
ndacl_ptr->revision = dacl_ptr->revision;
ndacl_ptr->size = 0;
ndacl_ptr->num_aces = 0;
rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
nmode);
sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
/* copy sec desc control portion & owner and group sids */
copy_sec_desc(pntsd, pnntsd, sidsoffset);
*aclflag = CIFS_ACL_DACL;
} else {
memcpy(pnntsd, pntsd, secdesclen);
if (uid_valid(uid)) { /* chown */
uid_t id;
owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
le32_to_cpu(pnntsd->osidoffset));
nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!nowner_sid_ptr)
return -ENOMEM;
id = from_kuid(&init_user_ns, uid);
rc = id_to_sid(id, SIDOWNER, nowner_sid_ptr);
if (rc) {
cifs_dbg(FYI, "%s: Mapping error %d for owner id %d\n",
__func__, rc, id);
kfree(nowner_sid_ptr);
return rc;
}
cifs_copy_sid(owner_sid_ptr, nowner_sid_ptr);
kfree(nowner_sid_ptr);
*aclflag = CIFS_ACL_OWNER;
}
if (gid_valid(gid)) { /* chgrp */
gid_t id;
group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
le32_to_cpu(pnntsd->gsidoffset));
ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!ngroup_sid_ptr)
return -ENOMEM;
id = from_kgid(&init_user_ns, gid);
rc = id_to_sid(id, SIDGROUP, ngroup_sid_ptr);
if (rc) {
cifs_dbg(FYI, "%s: Mapping error %d for group id %d\n",
__func__, rc, id);
kfree(ngroup_sid_ptr);
return rc;
}
cifs_copy_sid(group_sid_ptr, ngroup_sid_ptr);
kfree(ngroup_sid_ptr);
*aclflag = CIFS_ACL_GROUP;
}
}
return rc;
}
struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
const struct cifs_fid *cifsfid, u32 *pacllen)
{
struct cifs_ntsd *pntsd = NULL;
unsigned int xid;
int rc;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return ERR_CAST(tlink);
xid = get_xid();
rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
pacllen);
free_xid(xid);
cifs_put_tlink(tlink);
cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
if (rc)
return ERR_PTR(rc);
return pntsd;
}
static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
const char *path, u32 *pacllen)
{
struct cifs_ntsd *pntsd = NULL;
int oplock = 0;
unsigned int xid;
int rc, create_options = 0;
struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct cifs_fid fid;
struct cifs_open_parms oparms;
if (IS_ERR(tlink))
return ERR_CAST(tlink);
tcon = tlink_tcon(tlink);
xid = get_xid();
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = READ_CONTROL;
oparms.create_options = create_options;
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
oparms.reconnect = false;
rc = CIFS_open(xid, &oparms, &oplock, NULL);
if (!rc) {
rc = CIFSSMBGetCIFSACL(xid, tcon, fid.netfid, &pntsd, pacllen);
CIFSSMBClose(xid, tcon, fid.netfid);
}
cifs_put_tlink(tlink);
free_xid(xid);
cifs_dbg(FYI, "%s: rc = %d ACL len %d\n", __func__, rc, *pacllen);
if (rc)
return ERR_PTR(rc);
return pntsd;
}
/* Retrieve an ACL from the server */
struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
struct inode *inode, const char *path,
u32 *pacllen)
{
struct cifs_ntsd *pntsd = NULL;
struct cifsFileInfo *open_file = NULL;
if (inode)
open_file = find_readable_file(CIFS_I(inode), true);
if (!open_file)
return get_cifs_acl_by_path(cifs_sb, path, pacllen);
pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
cifsFileInfo_put(open_file);
return pntsd;
}
/* Set an ACL on the server */
int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
struct inode *inode, const char *path, int aclflag)
{
int oplock = 0;
unsigned int xid;
int rc, access_flags, create_options = 0;
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct cifs_fid fid;
struct cifs_open_parms oparms;
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
xid = get_xid();
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
access_flags = WRITE_OWNER;
else
access_flags = WRITE_DAC;
oparms.tcon = tcon;
oparms.cifs_sb = cifs_sb;
oparms.desired_access = access_flags;
oparms.create_options = create_options;
oparms.disposition = FILE_OPEN;
oparms.path = path;
oparms.fid = &fid;
oparms.reconnect = false;
rc = CIFS_open(xid, &oparms, &oplock, NULL);
if (rc) {
cifs_dbg(VFS, "Unable to open file to set ACL\n");
goto out;
}
rc = CIFSSMBSetCIFSACL(xid, tcon, fid.netfid, pnntsd, acllen, aclflag);
cifs_dbg(NOISY, "SetCIFSACL rc = %d\n", rc);
CIFSSMBClose(xid, tcon, fid.netfid);
out:
free_xid(xid);
cifs_put_tlink(tlink);
return rc;
}
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
struct inode *inode, const char *path,
const struct cifs_fid *pfid)
{
struct cifs_ntsd *pntsd = NULL;
u32 acllen = 0;
int rc = 0;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct cifs_tcon *tcon;
cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
if (pfid && (tcon->ses->server->ops->get_acl_by_fid))
pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid,
&acllen);
else if (tcon->ses->server->ops->get_acl)
pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
&acllen);
else {
cifs_put_tlink(tlink);
return -EOPNOTSUPP;
}
/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
if (IS_ERR(pntsd)) {
rc = PTR_ERR(pntsd);
cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
} else {
rc = parse_sec_desc(cifs_sb, pntsd, acllen, fattr);
kfree(pntsd);
if (rc)
cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
}
cifs_put_tlink(tlink);
return rc;
}
/* Convert mode bits to an ACL so we can update the ACL on the server */
int
id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
kuid_t uid, kgid_t gid)
{
int rc = 0;
int aclflag = CIFS_ACL_DACL; /* default flag to set */
__u32 secdesclen = 0;
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct cifs_tcon *tcon;
if (IS_ERR(tlink))
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
/* Get the security descriptor */
if (tcon->ses->server->ops->get_acl == NULL) {
cifs_put_tlink(tlink);
return -EOPNOTSUPP;
}
pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
&secdesclen);
if (IS_ERR(pntsd)) {
rc = PTR_ERR(pntsd);
cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
cifs_put_tlink(tlink);
return rc;
}
/*
* Add three ACEs for owner, group, everyone getting rid of other ACEs
* as chmod disables ACEs and set the security descriptor. Allocate
* memory for the smb header, set security descriptor request security
* descriptor parameters, and secuirty descriptor itself
*/
secdesclen = max_t(u32, secdesclen, DEFAULT_SEC_DESC_LEN);
pnntsd = kmalloc(secdesclen, GFP_KERNEL);
if (!pnntsd) {
kfree(pntsd);
cifs_put_tlink(tlink);
return -ENOMEM;
}
rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
&aclflag);
cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
if (tcon->ses->server->ops->set_acl == NULL)
rc = -EOPNOTSUPP;
if (!rc) {
/* Set the security descriptor */
rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode,
path, aclflag);
cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
}
cifs_put_tlink(tlink);
kfree(pnntsd);
kfree(pntsd);
return rc;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3060_3 |
crossvul-cpp_data_bad_3060_16 | /* Request key authorisation token key definition.
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* See Documentation/security/keys-request-key.txt
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "internal.h"
#include <keys/user-type.h>
static int request_key_auth_preparse(struct key_preparsed_payload *);
static void request_key_auth_free_preparse(struct key_preparsed_payload *);
static int request_key_auth_instantiate(struct key *,
struct key_preparsed_payload *);
static void request_key_auth_describe(const struct key *, struct seq_file *);
static void request_key_auth_revoke(struct key *);
static void request_key_auth_destroy(struct key *);
static long request_key_auth_read(const struct key *, char __user *, size_t);
/*
* The request-key authorisation key type definition.
*/
struct key_type key_type_request_key_auth = {
.name = ".request_key_auth",
.def_datalen = sizeof(struct request_key_auth),
.preparse = request_key_auth_preparse,
.free_preparse = request_key_auth_free_preparse,
.instantiate = request_key_auth_instantiate,
.describe = request_key_auth_describe,
.revoke = request_key_auth_revoke,
.destroy = request_key_auth_destroy,
.read = request_key_auth_read,
};
static int request_key_auth_preparse(struct key_preparsed_payload *prep)
{
return 0;
}
static void request_key_auth_free_preparse(struct key_preparsed_payload *prep)
{
}
/*
* Instantiate a request-key authorisation key.
*/
static int request_key_auth_instantiate(struct key *key,
struct key_preparsed_payload *prep)
{
key->payload.data = (struct request_key_auth *)prep->data;
return 0;
}
/*
* Describe an authorisation token.
*/
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
{
struct request_key_auth *rka = key->payload.data;
seq_puts(m, "key:");
seq_puts(m, key->description);
if (key_is_instantiated(key))
seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
}
/*
* Read the callout_info data (retrieves the callout information).
* - the key's semaphore is read-locked
*/
static long request_key_auth_read(const struct key *key,
char __user *buffer, size_t buflen)
{
struct request_key_auth *rka = key->payload.data;
size_t datalen;
long ret;
datalen = rka->callout_len;
ret = datalen;
/* we can return the data as is */
if (buffer && buflen > 0) {
if (buflen > datalen)
buflen = datalen;
if (copy_to_user(buffer, rka->callout_info, buflen) != 0)
ret = -EFAULT;
}
return ret;
}
/*
* Handle revocation of an authorisation token key.
*
* Called with the key sem write-locked.
*/
static void request_key_auth_revoke(struct key *key)
{
struct request_key_auth *rka = key->payload.data;
kenter("{%d}", key->serial);
if (rka->cred) {
put_cred(rka->cred);
rka->cred = NULL;
}
}
/*
* Destroy an instantiation authorisation token key.
*/
static void request_key_auth_destroy(struct key *key)
{
struct request_key_auth *rka = key->payload.data;
kenter("{%d}", key->serial);
if (rka->cred) {
put_cred(rka->cred);
rka->cred = NULL;
}
key_put(rka->target_key);
key_put(rka->dest_keyring);
kfree(rka->callout_info);
kfree(rka);
}
/*
* Create an authorisation token for /sbin/request-key or whoever to gain
* access to the caller's security data.
*/
struct key *request_key_auth_new(struct key *target, const void *callout_info,
size_t callout_len, struct key *dest_keyring)
{
struct request_key_auth *rka, *irka;
const struct cred *cred = current->cred;
struct key *authkey = NULL;
char desc[20];
int ret;
kenter("%d,", target->serial);
/* allocate a auth record */
rka = kmalloc(sizeof(*rka), GFP_KERNEL);
if (!rka) {
kleave(" = -ENOMEM");
return ERR_PTR(-ENOMEM);
}
rka->callout_info = kmalloc(callout_len, GFP_KERNEL);
if (!rka->callout_info) {
kleave(" = -ENOMEM");
kfree(rka);
return ERR_PTR(-ENOMEM);
}
/* see if the calling process is already servicing the key request of
* another process */
if (cred->request_key_auth) {
/* it is - use that instantiation context here too */
down_read(&cred->request_key_auth->sem);
/* if the auth key has been revoked, then the key we're
* servicing is already instantiated */
if (test_bit(KEY_FLAG_REVOKED, &cred->request_key_auth->flags))
goto auth_key_revoked;
irka = cred->request_key_auth->payload.data;
rka->cred = get_cred(irka->cred);
rka->pid = irka->pid;
up_read(&cred->request_key_auth->sem);
}
else {
/* it isn't - use this process as the context */
rka->cred = get_cred(cred);
rka->pid = current->pid;
}
rka->target_key = key_get(target);
rka->dest_keyring = key_get(dest_keyring);
memcpy(rka->callout_info, callout_info, callout_len);
rka->callout_len = callout_len;
/* allocate the auth key */
sprintf(desc, "%x", target->serial);
authkey = key_alloc(&key_type_request_key_auth, desc,
cred->fsuid, cred->fsgid, cred,
KEY_POS_VIEW | KEY_POS_READ | KEY_POS_SEARCH |
KEY_USR_VIEW, KEY_ALLOC_NOT_IN_QUOTA);
if (IS_ERR(authkey)) {
ret = PTR_ERR(authkey);
goto error_alloc;
}
/* construct the auth key */
ret = key_instantiate_and_link(authkey, rka, 0, NULL, NULL);
if (ret < 0)
goto error_inst;
kleave(" = {%d,%d}", authkey->serial, atomic_read(&authkey->usage));
return authkey;
auth_key_revoked:
up_read(&cred->request_key_auth->sem);
kfree(rka->callout_info);
kfree(rka);
kleave("= -EKEYREVOKED");
return ERR_PTR(-EKEYREVOKED);
error_inst:
key_revoke(authkey);
key_put(authkey);
error_alloc:
key_put(rka->target_key);
key_put(rka->dest_keyring);
kfree(rka->callout_info);
kfree(rka);
kleave("= %d", ret);
return ERR_PTR(ret);
}
/*
* Search the current process's keyrings for the authorisation key for
* instantiation of a key.
*/
struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
char description[16];
struct keyring_search_context ctx = {
.index_key.type = &key_type_request_key_auth,
.index_key.description = description,
.cred = current_cred(),
.match_data.cmp = user_match,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
};
struct key *authkey;
key_ref_t authkey_ref;
sprintf(description, "%x", target_id);
authkey_ref = search_process_keyrings(&ctx);
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
if (authkey == ERR_PTR(-EAGAIN))
authkey = ERR_PTR(-ENOKEY);
goto error;
}
authkey = key_ref_to_ptr(authkey_ref);
if (test_bit(KEY_FLAG_REVOKED, &authkey->flags)) {
key_put(authkey);
authkey = ERR_PTR(-EKEYREVOKED);
}
error:
return authkey;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3060_16 |
crossvul-cpp_data_good_4271_0 | /*
** $Id: ldebug.c $
** Debug Interface
** See Copyright Notice in lua.h
*/
#define ldebug_c
#define LUA_CORE
#include "lprefix.h"
#include <stdarg.h>
#include <stddef.h>
#include <string.h>
#include "lua.h"
#include "lapi.h"
#include "lcode.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
#include "lvm.h"
#define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_VCCL)
/* inverse of 'pcRel' */
#define invpcRel(pc, p) ((p)->code + (pc) + 1)
static const char *funcnamefromcode (lua_State *L, CallInfo *ci,
const char **name);
static int currentpc (CallInfo *ci) {
lua_assert(isLua(ci));
return pcRel(ci->u.l.savedpc, ci_func(ci)->p);
}
/*
** Get a "base line" to find the line corresponding to an instruction.
** For that, search the array of absolute line info for the largest saved
** instruction smaller or equal to the wanted instruction. A special
** case is when there is no absolute info or the instruction is before
** the first absolute one.
*/
static int getbaseline (const Proto *f, int pc, int *basepc) {
if (f->sizeabslineinfo == 0 || pc < f->abslineinfo[0].pc) {
*basepc = -1; /* start from the beginning */
return f->linedefined;
}
else {
unsigned int i;
if (pc >= f->abslineinfo[f->sizeabslineinfo - 1].pc)
i = f->sizeabslineinfo - 1; /* instruction is after last saved one */
else { /* binary search */
unsigned int j = f->sizeabslineinfo - 1; /* pc < anchorlines[j] */
i = 0; /* abslineinfo[i] <= pc */
while (i < j - 1) {
unsigned int m = (j + i) / 2;
if (pc >= f->abslineinfo[m].pc)
i = m;
else
j = m;
}
}
*basepc = f->abslineinfo[i].pc;
return f->abslineinfo[i].line;
}
}
/*
** Get the line corresponding to instruction 'pc' in function 'f';
** first gets a base line and from there does the increments until
** the desired instruction.
*/
int luaG_getfuncline (const Proto *f, int pc) {
if (f->lineinfo == NULL) /* no debug information? */
return -1;
else {
int basepc;
int baseline = getbaseline(f, pc, &basepc);
while (basepc++ < pc) { /* walk until given instruction */
lua_assert(f->lineinfo[basepc] != ABSLINEINFO);
baseline += f->lineinfo[basepc]; /* correct line */
}
return baseline;
}
}
static int getcurrentline (CallInfo *ci) {
return luaG_getfuncline(ci_func(ci)->p, currentpc(ci));
}
/*
** Set 'trap' for all active Lua frames.
** This function can be called during a signal, under "reasonable"
** assumptions. A new 'ci' is completely linked in the list before it
** becomes part of the "active" list, and we assume that pointers are
** atomic; see comment in next function.
** (A compiler doing interprocedural optimizations could, theoretically,
** reorder memory writes in such a way that the list could be
** temporarily broken while inserting a new element. We simply assume it
** has no good reasons to do that.)
*/
static void settraps (CallInfo *ci) {
for (; ci != NULL; ci = ci->previous)
if (isLua(ci))
ci->u.l.trap = 1;
}
/*
** This function can be called during a signal, under "reasonable"
** assumptions.
** Fields 'basehookcount' and 'hookcount' (set by 'resethookcount')
** are for debug only, and it is no problem if they get arbitrary
** values (causes at most one wrong hook call). 'hookmask' is an atomic
** value. We assume that pointers are atomic too (e.g., gcc ensures that
** for all platforms where it runs). Moreover, 'hook' is always checked
** before being called (see 'luaD_hook').
*/
LUA_API void lua_sethook (lua_State *L, lua_Hook func, int mask, int count) {
if (func == NULL || mask == 0) { /* turn off hooks? */
mask = 0;
func = NULL;
}
L->hook = func;
L->basehookcount = count;
resethookcount(L);
L->hookmask = cast_byte(mask);
if (mask)
settraps(L->ci); /* to trace inside 'luaV_execute' */
}
LUA_API lua_Hook lua_gethook (lua_State *L) {
return L->hook;
}
LUA_API int lua_gethookmask (lua_State *L) {
return L->hookmask;
}
LUA_API int lua_gethookcount (lua_State *L) {
return L->basehookcount;
}
LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar) {
int status;
CallInfo *ci;
if (level < 0) return 0; /* invalid (negative) level */
lua_lock(L);
for (ci = L->ci; level > 0 && ci != &L->base_ci; ci = ci->previous)
level--;
if (level == 0 && ci != &L->base_ci) { /* level found? */
status = 1;
ar->i_ci = ci;
}
else status = 0; /* no such level */
lua_unlock(L);
return status;
}
static const char *upvalname (const Proto *p, int uv) {
TString *s = check_exp(uv < p->sizeupvalues, p->upvalues[uv].name);
if (s == NULL) return "?";
else return getstr(s);
}
static const char *findvararg (CallInfo *ci, int n, StkId *pos) {
if (clLvalue(s2v(ci->func))->p->is_vararg) {
int nextra = ci->u.l.nextraargs;
if (n >= -nextra) { /* 'n' is negative */
*pos = ci->func - nextra - (n + 1);
return "(vararg)"; /* generic name for any vararg */
}
}
return NULL; /* no such vararg */
}
const char *luaG_findlocal (lua_State *L, CallInfo *ci, int n, StkId *pos) {
StkId base = ci->func + 1;
const char *name = NULL;
if (isLua(ci)) {
if (n < 0) /* access to vararg values? */
return findvararg(ci, n, pos);
else
name = luaF_getlocalname(ci_func(ci)->p, n, currentpc(ci));
}
if (name == NULL) { /* no 'standard' name? */
StkId limit = (ci == L->ci) ? L->top : ci->next->func;
if (limit - base >= n && n > 0) { /* is 'n' inside 'ci' stack? */
/* generic name for any valid slot */
name = isLua(ci) ? "(temporary)" : "(C temporary)";
}
else
return NULL; /* no name */
}
if (pos)
*pos = base + (n - 1);
return name;
}
LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n) {
const char *name;
lua_lock(L);
if (ar == NULL) { /* information about non-active function? */
if (!isLfunction(s2v(L->top - 1))) /* not a Lua function? */
name = NULL;
else /* consider live variables at function start (parameters) */
name = luaF_getlocalname(clLvalue(s2v(L->top - 1))->p, n, 0);
}
else { /* active function; get information through 'ar' */
StkId pos = NULL; /* to avoid warnings */
name = luaG_findlocal(L, ar->i_ci, n, &pos);
if (name) {
setobjs2s(L, L->top, pos);
api_incr_top(L);
}
}
lua_unlock(L);
return name;
}
LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) {
StkId pos = NULL; /* to avoid warnings */
const char *name;
lua_lock(L);
name = luaG_findlocal(L, ar->i_ci, n, &pos);
if (name) {
setobjs2s(L, pos, L->top - 1);
L->top--; /* pop value */
}
lua_unlock(L);
return name;
}
static void funcinfo (lua_Debug *ar, Closure *cl) {
if (noLuaClosure(cl)) {
ar->source = "=[C]";
ar->srclen = LL("=[C]");
ar->linedefined = -1;
ar->lastlinedefined = -1;
ar->what = "C";
}
else {
const Proto *p = cl->l.p;
if (p->source) {
ar->source = getstr(p->source);
ar->srclen = tsslen(p->source);
}
else {
ar->source = "=?";
ar->srclen = LL("=?");
}
ar->linedefined = p->linedefined;
ar->lastlinedefined = p->lastlinedefined;
ar->what = (ar->linedefined == 0) ? "main" : "Lua";
}
luaO_chunkid(ar->short_src, ar->source, ar->srclen);
}
static int nextline (const Proto *p, int currentline, int pc) {
if (p->lineinfo[pc] != ABSLINEINFO)
return currentline + p->lineinfo[pc];
else
return luaG_getfuncline(p, pc);
}
static void collectvalidlines (lua_State *L, Closure *f) {
if (noLuaClosure(f)) {
setnilvalue(s2v(L->top));
api_incr_top(L);
}
else {
int i;
TValue v;
const Proto *p = f->l.p;
int currentline = p->linedefined;
Table *t = luaH_new(L); /* new table to store active lines */
sethvalue2s(L, L->top, t); /* push it on stack */
api_incr_top(L);
setbtvalue(&v); /* boolean 'true' to be the value of all indices */
for (i = 0; i < p->sizelineinfo; i++) { /* for all lines with code */
currentline = nextline(p, currentline, i);
luaH_setint(L, t, currentline, &v); /* table[line] = true */
}
}
}
static const char *getfuncname (lua_State *L, CallInfo *ci, const char **name) {
if (ci == NULL) /* no 'ci'? */
return NULL; /* no info */
else if (ci->callstatus & CIST_FIN) { /* is this a finalizer? */
*name = "__gc";
return "metamethod"; /* report it as such */
}
/* calling function is a known Lua function? */
else if (!(ci->callstatus & CIST_TAIL) && isLua(ci->previous))
return funcnamefromcode(L, ci->previous, name);
else return NULL; /* no way to find a name */
}
static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar,
Closure *f, CallInfo *ci) {
int status = 1;
for (; *what; what++) {
switch (*what) {
case 'S': {
funcinfo(ar, f);
break;
}
case 'l': {
ar->currentline = (ci && isLua(ci)) ? getcurrentline(ci) : -1;
break;
}
case 'u': {
ar->nups = (f == NULL) ? 0 : f->c.nupvalues;
if (noLuaClosure(f)) {
ar->isvararg = 1;
ar->nparams = 0;
}
else {
ar->isvararg = f->l.p->is_vararg;
ar->nparams = f->l.p->numparams;
}
break;
}
case 't': {
ar->istailcall = (ci) ? ci->callstatus & CIST_TAIL : 0;
break;
}
case 'n': {
ar->namewhat = getfuncname(L, ci, &ar->name);
if (ar->namewhat == NULL) {
ar->namewhat = ""; /* not found */
ar->name = NULL;
}
break;
}
case 'r': {
if (ci == NULL || !(ci->callstatus & CIST_TRAN))
ar->ftransfer = ar->ntransfer = 0;
else {
ar->ftransfer = ci->u2.transferinfo.ftransfer;
ar->ntransfer = ci->u2.transferinfo.ntransfer;
}
break;
}
case 'L':
case 'f': /* handled by lua_getinfo */
break;
default: status = 0; /* invalid option */
}
}
return status;
}
LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) {
int status;
Closure *cl;
CallInfo *ci;
TValue *func;
lua_lock(L);
if (*what == '>') {
ci = NULL;
func = s2v(L->top - 1);
api_check(L, ttisfunction(func), "function expected");
what++; /* skip the '>' */
L->top--; /* pop function */
}
else {
ci = ar->i_ci;
func = s2v(ci->func);
lua_assert(ttisfunction(func));
}
cl = ttisclosure(func) ? clvalue(func) : NULL;
status = auxgetinfo(L, what, ar, cl, ci);
if (strchr(what, 'f')) {
setobj2s(L, L->top, func);
api_incr_top(L);
}
if (strchr(what, 'L'))
collectvalidlines(L, cl);
lua_unlock(L);
return status;
}
/*
** {======================================================
** Symbolic Execution
** =======================================================
*/
static const char *getobjname (const Proto *p, int lastpc, int reg,
const char **name);
/*
** Find a "name" for the constant 'c'.
*/
static void kname (const Proto *p, int c, const char **name) {
TValue *kvalue = &p->k[c];
*name = (ttisstring(kvalue)) ? svalue(kvalue) : "?";
}
/*
** Find a "name" for the register 'c'.
*/
static void rname (const Proto *p, int pc, int c, const char **name) {
const char *what = getobjname(p, pc, c, name); /* search for 'c' */
if (!(what && *what == 'c')) /* did not find a constant name? */
*name = "?";
}
/*
** Find a "name" for a 'C' value in an RK instruction.
*/
static void rkname (const Proto *p, int pc, Instruction i, const char **name) {
int c = GETARG_C(i); /* key index */
if (GETARG_k(i)) /* is 'c' a constant? */
kname(p, c, name);
else /* 'c' is a register */
rname(p, pc, c, name);
}
static int filterpc (int pc, int jmptarget) {
if (pc < jmptarget) /* is code conditional (inside a jump)? */
return -1; /* cannot know who sets that register */
else return pc; /* current position sets that register */
}
/*
** Try to find last instruction before 'lastpc' that modified register 'reg'.
*/
static int findsetreg (const Proto *p, int lastpc, int reg) {
int pc;
int setreg = -1; /* keep last instruction that changed 'reg' */
int jmptarget = 0; /* any code before this address is conditional */
if (testMMMode(GET_OPCODE(p->code[lastpc])))
lastpc--; /* previous instruction was not actually executed */
for (pc = 0; pc < lastpc; pc++) {
Instruction i = p->code[pc];
OpCode op = GET_OPCODE(i);
int a = GETARG_A(i);
int change; /* true if current instruction changed 'reg' */
switch (op) {
case OP_LOADNIL: { /* set registers from 'a' to 'a+b' */
int b = GETARG_B(i);
change = (a <= reg && reg <= a + b);
break;
}
case OP_TFORCALL: { /* affect all regs above its base */
change = (reg >= a + 2);
break;
}
case OP_CALL:
case OP_TAILCALL: { /* affect all registers above base */
change = (reg >= a);
break;
}
case OP_JMP: { /* doesn't change registers, but changes 'jmptarget' */
int b = GETARG_sJ(i);
int dest = pc + 1 + b;
/* jump does not skip 'lastpc' and is larger than current one? */
if (dest <= lastpc && dest > jmptarget)
jmptarget = dest; /* update 'jmptarget' */
change = 0;
break;
}
default: /* any instruction that sets A */
change = (testAMode(op) && reg == a);
break;
}
if (change)
setreg = filterpc(pc, jmptarget);
}
return setreg;
}
/*
** Check whether table being indexed by instruction 'i' is the
** environment '_ENV'
*/
static const char *gxf (const Proto *p, int pc, Instruction i, int isup) {
int t = GETARG_B(i); /* table index */
const char *name; /* name of indexed variable */
if (isup) /* is an upvalue? */
name = upvalname(p, t);
else
getobjname(p, pc, t, &name);
return (name && strcmp(name, LUA_ENV) == 0) ? "global" : "field";
}
static const char *getobjname (const Proto *p, int lastpc, int reg,
const char **name) {
int pc;
*name = luaF_getlocalname(p, reg + 1, lastpc);
if (*name) /* is a local? */
return "local";
/* else try symbolic execution */
pc = findsetreg(p, lastpc, reg);
if (pc != -1) { /* could find instruction? */
Instruction i = p->code[pc];
OpCode op = GET_OPCODE(i);
switch (op) {
case OP_MOVE: {
int b = GETARG_B(i); /* move from 'b' to 'a' */
if (b < GETARG_A(i))
return getobjname(p, pc, b, name); /* get name for 'b' */
break;
}
case OP_GETTABUP: {
int k = GETARG_C(i); /* key index */
kname(p, k, name);
return gxf(p, pc, i, 1);
}
case OP_GETTABLE: {
int k = GETARG_C(i); /* key index */
rname(p, pc, k, name);
return gxf(p, pc, i, 0);
}
case OP_GETI: {
*name = "integer index";
return "field";
}
case OP_GETFIELD: {
int k = GETARG_C(i); /* key index */
kname(p, k, name);
return gxf(p, pc, i, 0);
}
case OP_GETUPVAL: {
*name = upvalname(p, GETARG_B(i));
return "upvalue";
}
case OP_LOADK:
case OP_LOADKX: {
int b = (op == OP_LOADK) ? GETARG_Bx(i)
: GETARG_Ax(p->code[pc + 1]);
if (ttisstring(&p->k[b])) {
*name = svalue(&p->k[b]);
return "constant";
}
break;
}
case OP_SELF: {
rkname(p, pc, i, name);
return "method";
}
default: break; /* go through to return NULL */
}
}
return NULL; /* could not find reasonable name */
}
/*
** Try to find a name for a function based on the code that called it.
** (Only works when function was called by a Lua function.)
** Returns what the name is (e.g., "for iterator", "method",
** "metamethod") and sets '*name' to point to the name.
*/
static const char *funcnamefromcode (lua_State *L, CallInfo *ci,
const char **name) {
TMS tm = (TMS)0; /* (initial value avoids warnings) */
const Proto *p = ci_func(ci)->p; /* calling function */
int pc = currentpc(ci); /* calling instruction index */
Instruction i = p->code[pc]; /* calling instruction */
if (ci->callstatus & CIST_HOOKED) { /* was it called inside a hook? */
*name = "?";
return "hook";
}
switch (GET_OPCODE(i)) {
case OP_CALL:
case OP_TAILCALL:
return getobjname(p, pc, GETARG_A(i), name); /* get function name */
case OP_TFORCALL: { /* for iterator */
*name = "for iterator";
return "for iterator";
}
/* other instructions can do calls through metamethods */
case OP_SELF: case OP_GETTABUP: case OP_GETTABLE:
case OP_GETI: case OP_GETFIELD:
tm = TM_INDEX;
break;
case OP_SETTABUP: case OP_SETTABLE: case OP_SETI: case OP_SETFIELD:
tm = TM_NEWINDEX;
break;
case OP_MMBIN: case OP_MMBINI: case OP_MMBINK: {
tm = cast(TMS, GETARG_C(i));
break;
}
case OP_UNM: tm = TM_UNM; break;
case OP_BNOT: tm = TM_BNOT; break;
case OP_LEN: tm = TM_LEN; break;
case OP_CONCAT: tm = TM_CONCAT; break;
case OP_EQ: tm = TM_EQ; break;
case OP_LT: case OP_LE: case OP_LTI: case OP_LEI:
*name = "order"; /* '<=' can call '__lt', etc. */
return "metamethod";
case OP_CLOSE: case OP_RETURN:
*name = "close";
return "metamethod";
default:
return NULL; /* cannot find a reasonable name */
}
*name = getstr(G(L)->tmname[tm]) + 2;
return "metamethod";
}
/* }====================================================== */
/*
** The subtraction of two potentially unrelated pointers is
** not ISO C, but it should not crash a program; the subsequent
** checks are ISO C and ensure a correct result.
*/
static int isinstack (CallInfo *ci, const TValue *o) {
StkId base = ci->func + 1;
ptrdiff_t i = cast(StkId, o) - base;
return (0 <= i && i < (ci->top - base) && s2v(base + i) == o);
}
/*
** Checks whether value 'o' came from an upvalue. (That can only happen
** with instructions OP_GETTABUP/OP_SETTABUP, which operate directly on
** upvalues.)
*/
static const char *getupvalname (CallInfo *ci, const TValue *o,
const char **name) {
LClosure *c = ci_func(ci);
int i;
for (i = 0; i < c->nupvalues; i++) {
if (c->upvals[i]->v == o) {
*name = upvalname(c->p, i);
return "upvalue";
}
}
return NULL;
}
static const char *varinfo (lua_State *L, const TValue *o) {
const char *name = NULL; /* to avoid warnings */
CallInfo *ci = L->ci;
const char *kind = NULL;
if (isLua(ci)) {
kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */
if (!kind && isinstack(ci, o)) /* no? try a register */
kind = getobjname(ci_func(ci)->p, currentpc(ci),
cast_int(cast(StkId, o) - (ci->func + 1)), &name);
}
return (kind) ? luaO_pushfstring(L, " (%s '%s')", kind, name) : "";
}
l_noret luaG_typeerror (lua_State *L, const TValue *o, const char *op) {
const char *t = luaT_objtypename(L, o);
luaG_runerror(L, "attempt to %s a %s value%s", op, t, varinfo(L, o));
}
l_noret luaG_forerror (lua_State *L, const TValue *o, const char *what) {
luaG_runerror(L, "bad 'for' %s (number expected, got %s)",
what, luaT_objtypename(L, o));
}
l_noret luaG_concaterror (lua_State *L, const TValue *p1, const TValue *p2) {
if (ttisstring(p1) || cvt2str(p1)) p1 = p2;
luaG_typeerror(L, p1, "concatenate");
}
l_noret luaG_opinterror (lua_State *L, const TValue *p1,
const TValue *p2, const char *msg) {
if (!ttisnumber(p1)) /* first operand is wrong? */
p2 = p1; /* now second is wrong */
luaG_typeerror(L, p2, msg);
}
/*
** Error when both values are convertible to numbers, but not to integers
*/
l_noret luaG_tointerror (lua_State *L, const TValue *p1, const TValue *p2) {
lua_Integer temp;
if (!tointegerns(p1, &temp))
p2 = p1;
luaG_runerror(L, "number%s has no integer representation", varinfo(L, p2));
}
l_noret luaG_ordererror (lua_State *L, const TValue *p1, const TValue *p2) {
const char *t1 = luaT_objtypename(L, p1);
const char *t2 = luaT_objtypename(L, p2);
if (strcmp(t1, t2) == 0)
luaG_runerror(L, "attempt to compare two %s values", t1);
else
luaG_runerror(L, "attempt to compare %s with %s", t1, t2);
}
/* add src:line information to 'msg' */
const char *luaG_addinfo (lua_State *L, const char *msg, TString *src,
int line) {
char buff[LUA_IDSIZE];
if (src)
luaO_chunkid(buff, getstr(src), tsslen(src));
else { /* no source available; use "?" instead */
buff[0] = '?'; buff[1] = '\0';
}
return luaO_pushfstring(L, "%s:%d: %s", buff, line, msg);
}
l_noret luaG_errormsg (lua_State *L) {
if (L->errfunc != 0) { /* is there an error handling function? */
StkId errfunc = restorestack(L, L->errfunc);
lua_assert(ttisfunction(s2v(errfunc)));
setobjs2s(L, L->top, L->top - 1); /* move argument */
setobjs2s(L, L->top - 1, errfunc); /* push function */
L->top++; /* assume EXTRA_STACK */
luaD_callnoyield(L, L->top - 2, 1); /* call it */
}
luaD_throw(L, LUA_ERRRUN);
}
l_noret luaG_runerror (lua_State *L, const char *fmt, ...) {
CallInfo *ci = L->ci;
const char *msg;
va_list argp;
luaC_checkGC(L); /* error message uses memory */
va_start(argp, fmt);
msg = luaO_pushvfstring(L, fmt, argp); /* format message */
va_end(argp);
if (isLua(ci)) /* if Lua function, add source:line information */
luaG_addinfo(L, msg, ci_func(ci)->p->source, getcurrentline(ci));
luaG_errormsg(L);
}
/*
** Check whether new instruction 'newpc' is in a different line from
** previous instruction 'oldpc'.
*/
static int changedline (const Proto *p, int oldpc, int newpc) {
if (p->lineinfo == NULL) /* no debug information? */
return 0;
while (oldpc++ < newpc) {
if (p->lineinfo[oldpc] != 0)
return (luaG_getfuncline(p, oldpc - 1) != luaG_getfuncline(p, newpc));
}
return 0; /* no line changes between positions */
}
/*
** Traces the execution of a Lua function. Called before the execution
** of each opcode, when debug is on. 'L->oldpc' stores the last
** instruction traced, to detect line changes. When entering a new
** function, 'npci' will be zero and will test as a new line without
** the need for 'oldpc'; so, 'oldpc' does not need to be initialized
** before. Some exceptional conditions may return to a function without
** updating 'oldpc'. In that case, 'oldpc' may be invalid; if so, it is
** reset to zero. (A wrong but valid 'oldpc' at most causes an extra
** call to a line hook.)
*/
int luaG_traceexec (lua_State *L, const Instruction *pc) {
CallInfo *ci = L->ci;
lu_byte mask = L->hookmask;
const Proto *p = ci_func(ci)->p;
int counthook;
/* 'L->oldpc' may be invalid; reset it in this case */
int oldpc = (L->oldpc < p->sizecode) ? L->oldpc : 0;
if (!(mask & (LUA_MASKLINE | LUA_MASKCOUNT))) { /* no hooks? */
ci->u.l.trap = 0; /* don't need to stop again */
return 0; /* turn off 'trap' */
}
pc++; /* reference is always next instruction */
ci->u.l.savedpc = pc; /* save 'pc' */
counthook = (--L->hookcount == 0 && (mask & LUA_MASKCOUNT));
if (counthook)
resethookcount(L); /* reset count */
else if (!(mask & LUA_MASKLINE))
return 1; /* no line hook and count != 0; nothing to be done now */
if (ci->callstatus & CIST_HOOKYIELD) { /* called hook last time? */
ci->callstatus &= ~CIST_HOOKYIELD; /* erase mark */
return 1; /* do not call hook again (VM yielded, so it did not move) */
}
if (!isIT(*(ci->u.l.savedpc - 1)))
L->top = ci->top; /* prepare top */
if (counthook)
luaD_hook(L, LUA_HOOKCOUNT, -1, 0, 0); /* call count hook */
if (mask & LUA_MASKLINE) {
int npci = pcRel(pc, p);
if (npci == 0 || /* call linehook when enter a new function, */
pc <= invpcRel(oldpc, p) || /* when jump back (loop), or when */
changedline(p, oldpc, npci)) { /* enter new line */
int newline = luaG_getfuncline(p, npci);
luaD_hook(L, LUA_HOOKLINE, newline, 0, 0); /* call line hook */
}
L->oldpc = npci; /* 'pc' of last call to line hook */
}
if (L->status == LUA_YIELD) { /* did hook yield? */
if (counthook)
L->hookcount = 1; /* undo decrement to zero */
ci->u.l.savedpc--; /* undo increment (resume will increment it again) */
ci->callstatus |= CIST_HOOKYIELD; /* mark that it yielded */
luaD_throw(L, LUA_YIELD);
}
return 1; /* keep 'trap' on */
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_4271_0 |
crossvul-cpp_data_good_4816_0 | /*
* Software multibuffer async crypto daemon.
*
* Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
*
* Adapted from crypto daemon.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/mcryptd.h>
#include <crypto/crypto_wq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#define MCRYPTD_MAX_CPU_QLEN 100
#define MCRYPTD_BATCH 9
static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
unsigned int tail);
struct mcryptd_flush_list {
struct list_head list;
struct mutex lock;
};
static struct mcryptd_flush_list __percpu *mcryptd_flist;
struct hashd_instance_ctx {
struct crypto_ahash_spawn spawn;
struct mcryptd_queue *queue;
};
static void mcryptd_queue_worker(struct work_struct *work);
void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
{
struct mcryptd_flush_list *flist;
if (!cstate->flusher_engaged) {
/* put the flusher on the flush list */
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
mutex_lock(&flist->lock);
list_add_tail(&cstate->flush_list, &flist->list);
cstate->flusher_engaged = true;
cstate->next_flush = jiffies + delay;
queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
&cstate->flush, delay);
mutex_unlock(&flist->lock);
}
}
EXPORT_SYMBOL(mcryptd_arm_flusher);
static int mcryptd_init_queue(struct mcryptd_queue *queue,
unsigned int max_cpu_qlen)
{
int cpu;
struct mcryptd_cpu_queue *cpu_queue;
queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
if (!queue->cpu_queue)
return -ENOMEM;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
}
return 0;
}
static void mcryptd_fini_queue(struct mcryptd_queue *queue)
{
int cpu;
struct mcryptd_cpu_queue *cpu_queue;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
BUG_ON(cpu_queue->queue.qlen);
}
free_percpu(queue->cpu_queue);
}
static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
struct crypto_async_request *request,
struct mcryptd_hash_request_ctx *rctx)
{
int cpu, err;
struct mcryptd_cpu_queue *cpu_queue;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
rctx->tag.cpu = cpu;
err = crypto_enqueue_request(&cpu_queue->queue, request);
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
cpu, cpu_queue, request);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
return err;
}
/*
* Try to opportunisticlly flush the partially completed jobs if
* crypto daemon is the only task running.
*/
static void mcryptd_opportunistic_flush(void)
{
struct mcryptd_flush_list *flist;
struct mcryptd_alg_cstate *cstate;
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
while (single_task_running()) {
mutex_lock(&flist->lock);
cstate = list_first_entry_or_null(&flist->list,
struct mcryptd_alg_cstate, flush_list);
if (!cstate || !cstate->flusher_engaged) {
mutex_unlock(&flist->lock);
return;
}
list_del(&cstate->flush_list);
cstate->flusher_engaged = false;
mutex_unlock(&flist->lock);
cstate->alg_state->flusher(cstate);
}
}
/*
* Called in workqueue context, do one real cryption work (via
* req->complete) and reschedule itself if there are more work to
* do.
*/
static void mcryptd_queue_worker(struct work_struct *work)
{
struct mcryptd_cpu_queue *cpu_queue;
struct crypto_async_request *req, *backlog;
int i;
/*
* Need to loop through more than once for multi-buffer to
* be effective.
*/
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
i = 0;
while (i < MCRYPTD_BATCH || single_task_running()) {
/*
* preempt_disable/enable is used to prevent
* being preempted by mcryptd_enqueue_request()
*/
local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable();
local_bh_enable();
if (!req) {
mcryptd_opportunistic_flush();
return;
}
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req->complete(req, 0);
if (!cpu_queue->queue.qlen)
return;
++i;
}
if (cpu_queue->queue.qlen)
queue_work(kcrypto_wq, &cpu_queue->work);
}
void mcryptd_flusher(struct work_struct *__work)
{
struct mcryptd_alg_cstate *alg_cpu_state;
struct mcryptd_alg_state *alg_state;
struct mcryptd_flush_list *flist;
int cpu;
cpu = smp_processor_id();
alg_cpu_state = container_of(to_delayed_work(__work),
struct mcryptd_alg_cstate, flush);
alg_state = alg_cpu_state->alg_state;
if (alg_cpu_state->cpu != cpu)
pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
cpu, alg_cpu_state->cpu);
if (alg_cpu_state->flusher_engaged) {
flist = per_cpu_ptr(mcryptd_flist, cpu);
mutex_lock(&flist->lock);
list_del(&alg_cpu_state->flush_list);
alg_cpu_state->flusher_engaged = false;
mutex_unlock(&flist->lock);
alg_state->flusher(alg_cpu_state);
}
}
EXPORT_SYMBOL_GPL(mcryptd_flusher);
static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
return ictx->queue;
}
static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
unsigned int tail)
{
char *p;
struct crypto_instance *inst;
int err;
p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
return p;
out_free_inst:
kfree(p);
p = ERR_PTR(err);
goto out;
}
static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
u32 *mask)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return false;
*type |= algt->type & CRYPTO_ALG_INTERNAL;
*mask |= algt->mask & CRYPTO_ALG_INTERNAL;
if (*type & *mask & CRYPTO_ALG_INTERNAL)
return true;
else
return false;
}
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_ahash_spawn *spawn = &ictx->spawn;
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *hash;
hash = crypto_spawn_ahash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
ctx->child = hash;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct mcryptd_hash_request_ctx) +
crypto_ahash_reqsize(hash));
return 0;
}
static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_ahash(ctx->child);
}
static int mcryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_ahash *child = ctx->child;
int err;
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(child, key, keylen);
crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int mcryptd_hash_enqueue(struct ahash_request *req,
crypto_completion_t complete)
{
int ret;
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct mcryptd_queue *queue =
mcryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
ret = mcryptd_enqueue_request(queue, &req->base, rctx);
return ret;
}
static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_ahash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct ahash_request *desc = &rctx->areq;
if (unlikely(err == -EINPROGRESS))
goto out;
ahash_request_set_tfm(desc, child);
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
rctx->complete, req_async);
rctx->out = req->result;
err = crypto_ahash_init(desc);
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_init_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_init);
}
static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
err = ahash_mcryptd_update(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
}
return;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_update_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_update);
}
static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
err = ahash_mcryptd_final(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
}
return;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_final_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_final);
}
static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
err = ahash_mcryptd_finup(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
}
return;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
}
static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_ahash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct ahash_request *desc = &rctx->areq;
if (unlikely(err == -EINPROGRESS))
goto out;
ahash_request_set_tfm(desc, child);
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
rctx->complete, req_async);
rctx->out = req->result;
err = ahash_mcryptd_digest(desc);
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
}
static int mcryptd_hash_export(struct ahash_request *req, void *out)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_ahash_export(&rctx->areq, out);
}
static int mcryptd_hash_import(struct ahash_request *req, const void *in)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_ahash_import(&rctx->areq, in);
}
static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct mcryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
struct hash_alg_common *halg;
struct crypto_alg *alg;
u32 type = 0;
u32 mask = 0;
int err;
if (!mcryptd_check_internal(tb, &type, &mask))
return -EINVAL;
halg = ahash_attr_alg(tb[1], type, mask);
if (IS_ERR(halg))
return PTR_ERR(halg);
alg = &halg->base;
pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
sizeof(*ctx));
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
ctx = ahash_instance_ctx(inst);
ctx->queue = queue;
err = crypto_init_ahash_spawn(&ctx->spawn, halg,
ahash_crypto_instance(inst));
if (err)
goto out_free_inst;
type = CRYPTO_ALG_ASYNC;
if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
type |= CRYPTO_ALG_INTERNAL;
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = halg->digestsize;
inst->alg.halg.statesize = halg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
inst->alg.init = mcryptd_hash_init_enqueue;
inst->alg.update = mcryptd_hash_update_enqueue;
inst->alg.final = mcryptd_hash_final_enqueue;
inst->alg.finup = mcryptd_hash_finup_enqueue;
inst->alg.export = mcryptd_hash_export;
inst->alg.import = mcryptd_hash_import;
inst->alg.setkey = mcryptd_hash_setkey;
inst->alg.digest = mcryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
if (err) {
crypto_drop_ahash(&ctx->spawn);
out_free_inst:
kfree(inst);
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
static struct mcryptd_queue mqueue;
static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_DIGEST:
return mcryptd_create_hash(tmpl, tb, &mqueue);
break;
}
return -EINVAL;
}
static void mcryptd_free(struct crypto_instance *inst)
{
struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AHASH:
crypto_drop_ahash(&hctx->spawn);
kfree(ahash_instance(inst));
return;
default:
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
}
}
static struct crypto_template mcryptd_tmpl = {
.name = "mcryptd",
.create = mcryptd_create,
.free = mcryptd_free,
.module = THIS_MODULE,
};
struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_ahash *tfm;
if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ahash(tfm);
return ERR_PTR(-EINVAL);
}
return __mcryptd_ahash_cast(tfm);
}
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
int ahash_mcryptd_digest(struct ahash_request *desc)
{
return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
}
int ahash_mcryptd_update(struct ahash_request *desc)
{
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return crypto_ahash_update(desc);
}
int ahash_mcryptd_finup(struct ahash_request *desc)
{
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return crypto_ahash_finup(desc);
}
int ahash_mcryptd_final(struct ahash_request *desc)
{
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return crypto_ahash_final(desc);
}
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return &rctx->areq;
}
EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
{
crypto_free_ahash(&tfm->base);
}
EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
static int __init mcryptd_init(void)
{
int err, cpu;
struct mcryptd_flush_list *flist;
mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
for_each_possible_cpu(cpu) {
flist = per_cpu_ptr(mcryptd_flist, cpu);
INIT_LIST_HEAD(&flist->list);
mutex_init(&flist->lock);
}
err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
if (err) {
free_percpu(mcryptd_flist);
return err;
}
err = crypto_register_template(&mcryptd_tmpl);
if (err) {
mcryptd_fini_queue(&mqueue);
free_percpu(mcryptd_flist);
}
return err;
}
static void __exit mcryptd_exit(void)
{
mcryptd_fini_queue(&mqueue);
crypto_unregister_template(&mcryptd_tmpl);
free_percpu(mcryptd_flist);
}
subsys_initcall(mcryptd_init);
module_exit(mcryptd_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
MODULE_ALIAS_CRYPTO("mcryptd");
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_4816_0 |
crossvul-cpp_data_bad_3256_0 | /*
* This contains encryption functions for per-file encryption.
*
* Copyright (C) 2015, Google, Inc.
* Copyright (C) 2015, Motorola Mobility
*
* Written by Michael Halcrow, 2014.
*
* Filename encryption additions
* Uday Savagaonkar, 2014
* Encryption policy handling additions
* Ildar Muslukhov, 2014
* Add fscrypt_pullback_bio_page()
* Jaegeuk Kim, 2015.
*
* This has not yet undergone a rigorous security audit.
*
* The usage of AES-XTS should conform to recommendations in NIST
* Special Publication 800-38E and IEEE P1619/D16.
*/
#include <linux/pagemap.h>
#include <linux/mempool.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include "fscrypt_private.h"
static unsigned int num_prealloc_crypto_pages = 32;
static unsigned int num_prealloc_crypto_ctxs = 128;
module_param(num_prealloc_crypto_pages, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_pages,
"Number of crypto pages to preallocate");
module_param(num_prealloc_crypto_ctxs, uint, 0444);
MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
"Number of crypto contexts to preallocate");
static mempool_t *fscrypt_bounce_page_pool = NULL;
static LIST_HEAD(fscrypt_free_ctxs);
static DEFINE_SPINLOCK(fscrypt_ctx_lock);
struct workqueue_struct *fscrypt_read_workqueue;
static DEFINE_MUTEX(fscrypt_init_mutex);
static struct kmem_cache *fscrypt_ctx_cachep;
struct kmem_cache *fscrypt_info_cachep;
/**
* fscrypt_release_ctx() - Releases an encryption context
* @ctx: The encryption context to release.
*
* If the encryption context was allocated from the pre-allocated pool, returns
* it to that pool. Else, frees it.
*
* If there's a bounce page in the context, this frees that.
*/
void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
{
unsigned long flags;
if (ctx->flags & FS_CTX_HAS_BOUNCE_BUFFER_FL && ctx->w.bounce_page) {
mempool_free(ctx->w.bounce_page, fscrypt_bounce_page_pool);
ctx->w.bounce_page = NULL;
}
ctx->w.control_page = NULL;
if (ctx->flags & FS_CTX_REQUIRES_FREE_ENCRYPT_FL) {
kmem_cache_free(fscrypt_ctx_cachep, ctx);
} else {
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
list_add(&ctx->free_list, &fscrypt_free_ctxs);
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
}
}
EXPORT_SYMBOL(fscrypt_release_ctx);
/**
* fscrypt_get_ctx() - Gets an encryption context
* @inode: The inode for which we are doing the crypto
* @gfp_flags: The gfp flag for memory allocation
*
* Allocates and initializes an encryption context.
*
* Return: An allocated and initialized encryption context on success; error
* value or NULL otherwise.
*/
struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, gfp_t gfp_flags)
{
struct fscrypt_ctx *ctx = NULL;
struct fscrypt_info *ci = inode->i_crypt_info;
unsigned long flags;
if (ci == NULL)
return ERR_PTR(-ENOKEY);
/*
* We first try getting the ctx from a free list because in
* the common case the ctx will have an allocated and
* initialized crypto tfm, so it's probably a worthwhile
* optimization. For the bounce page, we first try getting it
* from the kernel allocator because that's just about as fast
* as getting it from a list and because a cache of free pages
* should generally be a "last resort" option for a filesystem
* to be able to do its job.
*/
spin_lock_irqsave(&fscrypt_ctx_lock, flags);
ctx = list_first_entry_or_null(&fscrypt_free_ctxs,
struct fscrypt_ctx, free_list);
if (ctx)
list_del(&ctx->free_list);
spin_unlock_irqrestore(&fscrypt_ctx_lock, flags);
if (!ctx) {
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, gfp_flags);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->flags |= FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
} else {
ctx->flags &= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL;
}
ctx->flags &= ~FS_CTX_HAS_BOUNCE_BUFFER_FL;
return ctx;
}
EXPORT_SYMBOL(fscrypt_get_ctx);
/**
* page_crypt_complete() - completion callback for page crypto
* @req: The asynchronous cipher request context
* @res: The result of the cipher operation
*/
static void page_crypt_complete(struct crypto_async_request *req, int res)
{
struct fscrypt_completion_result *ecr = req->data;
if (res == -EINPROGRESS)
return;
ecr->res = res;
complete(&ecr->completion);
}
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
u64 lblk_num, struct page *src_page,
struct page *dest_page, unsigned int len,
unsigned int offs, gfp_t gfp_flags)
{
struct {
__le64 index;
u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
} xts_tweak;
struct skcipher_request *req = NULL;
DECLARE_FS_COMPLETION_RESULT(ecr);
struct scatterlist dst, src;
struct fscrypt_info *ci = inode->i_crypt_info;
struct crypto_skcipher *tfm = ci->ci_ctfm;
int res = 0;
BUG_ON(len == 0);
req = skcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
__func__);
return -ENOMEM;
}
skcipher_request_set_callback(
req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
page_crypt_complete, &ecr);
BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
xts_tweak.index = cpu_to_le64(lblk_num);
memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));
sg_init_table(&dst, 1);
sg_set_page(&dst, dest_page, len, offs);
sg_init_table(&src, 1);
sg_set_page(&src, src_page, len, offs);
skcipher_request_set_crypt(req, &src, &dst, len, &xts_tweak);
if (rw == FS_DECRYPT)
res = crypto_skcipher_decrypt(req);
else
res = crypto_skcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
wait_for_completion(&ecr.completion);
res = ecr.res;
}
skcipher_request_free(req);
if (res) {
printk_ratelimited(KERN_ERR
"%s: crypto_skcipher_encrypt() returned %d\n",
__func__, res);
return res;
}
return 0;
}
struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx,
gfp_t gfp_flags)
{
ctx->w.bounce_page = mempool_alloc(fscrypt_bounce_page_pool, gfp_flags);
if (ctx->w.bounce_page == NULL)
return ERR_PTR(-ENOMEM);
ctx->flags |= FS_CTX_HAS_BOUNCE_BUFFER_FL;
return ctx->w.bounce_page;
}
/**
* fscypt_encrypt_page() - Encrypts a page
* @inode: The inode for which the encryption should take place
* @page: The page to encrypt. Must be locked for bounce-page
* encryption.
* @len: Length of data to encrypt in @page and encrypted
* data in returned page.
* @offs: Offset of data within @page and returned
* page holding encrypted data.
* @lblk_num: Logical block number. This must be unique for multiple
* calls with same inode, except when overwriting
* previously written data.
* @gfp_flags: The gfp flag for memory allocation
*
* Encrypts @page using the ctx encryption context. Performs encryption
* either in-place or into a newly allocated bounce page.
* Called on the page write path.
*
* Bounce page allocation is the default.
* In this case, the contents of @page are encrypted and stored in an
* allocated bounce page. @page has to be locked and the caller must call
* fscrypt_restore_control_page() on the returned ciphertext page to
* release the bounce buffer and the encryption context.
*
* In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
* fscrypt_operations. Here, the input-page is returned with its content
* encrypted.
*
* Return: A page with the encrypted content on success. Else, an
* error value or NULL.
*/
struct page *fscrypt_encrypt_page(const struct inode *inode,
struct page *page,
unsigned int len,
unsigned int offs,
u64 lblk_num, gfp_t gfp_flags)
{
struct fscrypt_ctx *ctx;
struct page *ciphertext_page = page;
int err;
BUG_ON(len % FS_CRYPTO_BLOCK_SIZE != 0);
if (inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES) {
/* with inplace-encryption we just encrypt the page */
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num, page,
ciphertext_page, len, offs,
gfp_flags);
if (err)
return ERR_PTR(err);
return ciphertext_page;
}
BUG_ON(!PageLocked(page));
ctx = fscrypt_get_ctx(inode, gfp_flags);
if (IS_ERR(ctx))
return (struct page *)ctx;
/* The encryption operation will require a bounce page. */
ciphertext_page = fscrypt_alloc_bounce_page(ctx, gfp_flags);
if (IS_ERR(ciphertext_page))
goto errout;
ctx->w.control_page = page;
err = fscrypt_do_page_crypto(inode, FS_ENCRYPT, lblk_num,
page, ciphertext_page, len, offs,
gfp_flags);
if (err) {
ciphertext_page = ERR_PTR(err);
goto errout;
}
SetPagePrivate(ciphertext_page);
set_page_private(ciphertext_page, (unsigned long)ctx);
lock_page(ciphertext_page);
return ciphertext_page;
errout:
fscrypt_release_ctx(ctx);
return ciphertext_page;
}
EXPORT_SYMBOL(fscrypt_encrypt_page);
/**
* fscrypt_decrypt_page() - Decrypts a page in-place
* @inode: The corresponding inode for the page to decrypt.
* @page: The page to decrypt. Must be locked in case
* it is a writeback page (FS_CFLG_OWN_PAGES unset).
* @len: Number of bytes in @page to be decrypted.
* @offs: Start of data in @page.
* @lblk_num: Logical block number.
*
* Decrypts page in-place using the ctx encryption context.
*
* Called from the read completion callback.
*
* Return: Zero on success, non-zero otherwise.
*/
int fscrypt_decrypt_page(const struct inode *inode, struct page *page,
unsigned int len, unsigned int offs, u64 lblk_num)
{
if (!(inode->i_sb->s_cop->flags & FS_CFLG_OWN_PAGES))
BUG_ON(!PageLocked(page));
return fscrypt_do_page_crypto(inode, FS_DECRYPT, lblk_num, page, page,
len, offs, GFP_NOFS);
}
EXPORT_SYMBOL(fscrypt_decrypt_page);
/*
* Validate dentries for encrypted directories to make sure we aren't
* potentially caching stale data after a key has been added or
* removed.
*/
static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct dentry *dir;
struct fscrypt_info *ci;
int dir_has_key, cached_with_key;
if (flags & LOOKUP_RCU)
return -ECHILD;
dir = dget_parent(dentry);
if (!d_inode(dir)->i_sb->s_cop->is_encrypted(d_inode(dir))) {
dput(dir);
return 0;
}
ci = d_inode(dir)->i_crypt_info;
if (ci && ci->ci_keyring_key &&
(ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED) |
(1 << KEY_FLAG_DEAD))))
ci = NULL;
/* this should eventually be an flag in d_flags */
spin_lock(&dentry->d_lock);
cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY;
spin_unlock(&dentry->d_lock);
dir_has_key = (ci != NULL);
dput(dir);
/*
* If the dentry was cached without the key, and it is a
* negative dentry, it might be a valid name. We can't check
* if the key has since been made available due to locking
* reasons, so we fail the validation so ext4_lookup() can do
* this check.
*
* We also fail the validation if the dentry was created with
* the key present, but we no longer have the key, or vice versa.
*/
if ((!cached_with_key && d_is_negative(dentry)) ||
(!cached_with_key && dir_has_key) ||
(cached_with_key && !dir_has_key))
return 0;
return 1;
}
const struct dentry_operations fscrypt_d_ops = {
.d_revalidate = fscrypt_d_revalidate,
};
EXPORT_SYMBOL(fscrypt_d_ops);
void fscrypt_restore_control_page(struct page *page)
{
struct fscrypt_ctx *ctx;
ctx = (struct fscrypt_ctx *)page_private(page);
set_page_private(page, (unsigned long)NULL);
ClearPagePrivate(page);
unlock_page(page);
fscrypt_release_ctx(ctx);
}
EXPORT_SYMBOL(fscrypt_restore_control_page);
static void fscrypt_destroy(void)
{
struct fscrypt_ctx *pos, *n;
list_for_each_entry_safe(pos, n, &fscrypt_free_ctxs, free_list)
kmem_cache_free(fscrypt_ctx_cachep, pos);
INIT_LIST_HEAD(&fscrypt_free_ctxs);
mempool_destroy(fscrypt_bounce_page_pool);
fscrypt_bounce_page_pool = NULL;
}
/**
* fscrypt_initialize() - allocate major buffers for fs encryption.
* @cop_flags: fscrypt operations flags
*
* We only call this when we start accessing encrypted files, since it
* results in memory getting allocated that wouldn't otherwise be used.
*
* Return: Zero on success, non-zero otherwise.
*/
int fscrypt_initialize(unsigned int cop_flags)
{
int i, res = -ENOMEM;
/*
* No need to allocate a bounce page pool if there already is one or
* this FS won't use it.
*/
if (cop_flags & FS_CFLG_OWN_PAGES || fscrypt_bounce_page_pool)
return 0;
mutex_lock(&fscrypt_init_mutex);
if (fscrypt_bounce_page_pool)
goto already_initialized;
for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
struct fscrypt_ctx *ctx;
ctx = kmem_cache_zalloc(fscrypt_ctx_cachep, GFP_NOFS);
if (!ctx)
goto fail;
list_add(&ctx->free_list, &fscrypt_free_ctxs);
}
fscrypt_bounce_page_pool =
mempool_create_page_pool(num_prealloc_crypto_pages, 0);
if (!fscrypt_bounce_page_pool)
goto fail;
already_initialized:
mutex_unlock(&fscrypt_init_mutex);
return 0;
fail:
fscrypt_destroy();
mutex_unlock(&fscrypt_init_mutex);
return res;
}
/**
* fscrypt_init() - Set up for fs encryption.
*/
static int __init fscrypt_init(void)
{
fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue",
WQ_HIGHPRI, 0);
if (!fscrypt_read_workqueue)
goto fail;
fscrypt_ctx_cachep = KMEM_CACHE(fscrypt_ctx, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_ctx_cachep)
goto fail_free_queue;
fscrypt_info_cachep = KMEM_CACHE(fscrypt_info, SLAB_RECLAIM_ACCOUNT);
if (!fscrypt_info_cachep)
goto fail_free_ctx;
return 0;
fail_free_ctx:
kmem_cache_destroy(fscrypt_ctx_cachep);
fail_free_queue:
destroy_workqueue(fscrypt_read_workqueue);
fail:
return -ENOMEM;
}
module_init(fscrypt_init)
/**
* fscrypt_exit() - Shutdown the fs encryption system
*/
static void __exit fscrypt_exit(void)
{
fscrypt_destroy();
if (fscrypt_read_workqueue)
destroy_workqueue(fscrypt_read_workqueue);
kmem_cache_destroy(fscrypt_ctx_cachep);
kmem_cache_destroy(fscrypt_info_cachep);
}
module_exit(fscrypt_exit);
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3256_0 |
crossvul-cpp_data_bad_2561_0 | /*
misc.c : irssi
Copyright (C) 1999 Timo Sirainen
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "module.h"
#include "misc.h"
#include "commands.h"
typedef struct {
int condition;
GInputFunction function;
void *data;
} IRSSI_INPUT_REC;
static int irssi_io_invoke(GIOChannel *source, GIOCondition condition,
void *data)
{
IRSSI_INPUT_REC *rec = data;
int icond = 0;
if (condition & (G_IO_ERR | G_IO_HUP | G_IO_NVAL)) {
/* error, we have to call the function.. */
if (rec->condition & G_IO_IN)
icond |= G_INPUT_READ;
else
icond |= G_INPUT_WRITE;
}
if (condition & (G_IO_IN | G_IO_PRI))
icond |= G_INPUT_READ;
if (condition & G_IO_OUT)
icond |= G_INPUT_WRITE;
if (rec->condition & icond)
rec->function(rec->data, source, icond);
return TRUE;
}
int g_input_add_full(GIOChannel *source, int priority, int condition,
GInputFunction function, void *data)
{
IRSSI_INPUT_REC *rec;
unsigned int result;
GIOCondition cond;
rec = g_new(IRSSI_INPUT_REC, 1);
rec->condition = condition;
rec->function = function;
rec->data = data;
cond = (GIOCondition) (G_IO_ERR|G_IO_HUP|G_IO_NVAL);
if (condition & G_INPUT_READ)
cond |= G_IO_IN|G_IO_PRI;
if (condition & G_INPUT_WRITE)
cond |= G_IO_OUT;
result = g_io_add_watch_full(source, priority, cond,
irssi_io_invoke, rec, g_free);
return result;
}
int g_input_add(GIOChannel *source, int condition,
GInputFunction function, void *data)
{
return g_input_add_full(source, G_PRIORITY_DEFAULT, condition,
function, data);
}
/* easy way to bypass glib polling of io channel internal buffer */
int g_input_add_poll(int fd, int priority, int condition,
GInputFunction function, void *data)
{
GIOChannel *source = g_io_channel_unix_new(fd);
int ret = g_input_add_full(source, priority, condition, function, data);
g_io_channel_unref(source);
return ret;
}
int g_timeval_cmp(const GTimeVal *tv1, const GTimeVal *tv2)
{
if (tv1->tv_sec < tv2->tv_sec)
return -1;
if (tv1->tv_sec > tv2->tv_sec)
return 1;
return tv1->tv_usec < tv2->tv_usec ? -1 :
tv1->tv_usec > tv2->tv_usec ? 1 : 0;
}
long get_timeval_diff(const GTimeVal *tv1, const GTimeVal *tv2)
{
long secs, usecs;
secs = tv1->tv_sec - tv2->tv_sec;
usecs = tv1->tv_usec - tv2->tv_usec;
if (usecs < 0) {
usecs += 1000000;
secs--;
}
usecs = usecs/1000 + secs * 1000;
return usecs;
}
int find_substr(const char *list, const char *item)
{
const char *ptr;
g_return_val_if_fail(list != NULL, FALSE);
g_return_val_if_fail(item != NULL, FALSE);
if (*item == '\0')
return FALSE;
for (;;) {
while (i_isspace(*list)) list++;
if (*list == '\0') break;
ptr = strchr(list, ' ');
if (ptr == NULL) ptr = list+strlen(list);
if (g_ascii_strncasecmp(list, item, ptr-list) == 0 &&
item[ptr-list] == '\0')
return TRUE;
list = ptr;
}
return FALSE;
}
int strarray_find(char **array, const char *item)
{
char **tmp;
int index;
g_return_val_if_fail(array != NULL, -1);
g_return_val_if_fail(item != NULL, -1);
index = 0;
for (tmp = array; *tmp != NULL; tmp++, index++) {
if (g_ascii_strcasecmp(*tmp, item) == 0)
return index;
}
return -1;
}
GSList *gslist_find_string(GSList *list, const char *key)
{
for (; list != NULL; list = list->next)
if (g_strcmp0(list->data, key) == 0) return list;
return NULL;
}
GSList *gslist_find_icase_string(GSList *list, const char *key)
{
for (; list != NULL; list = list->next)
if (g_ascii_strcasecmp(list->data, key) == 0) return list;
return NULL;
}
void *gslist_foreach_find(GSList *list, FOREACH_FIND_FUNC func, const void *data)
{
void *ret;
while (list != NULL) {
ret = func(list->data, (void *) data);
if (ret != NULL) return ret;
list = list->next;
}
return NULL;
}
void gslist_free_full (GSList *list, GDestroyNotify free_func)
{
GSList *tmp;
if (list == NULL)
return;
for (tmp = list; tmp != NULL; tmp = tmp->next)
free_func(tmp->data);
g_slist_free(list);
}
GSList *gslist_remove_string (GSList *list, const char *str)
{
GSList *l;
l = g_slist_find_custom(list, str, (GCompareFunc) g_strcmp0);
if (l != NULL)
return g_slist_remove_link(list, l);
return list;
}
/* `list' contains pointer to structure with a char* to string. */
char *gslistptr_to_string(GSList *list, int offset, const char *delimiter)
{
GString *str;
char **data, *ret;
str = g_string_new(NULL);
while (list != NULL) {
data = G_STRUCT_MEMBER_P(list->data, offset);
if (str->len != 0) g_string_append(str, delimiter);
g_string_append(str, *data);
list = list->next;
}
ret = str->str;
g_string_free(str, FALSE);
return ret;
}
/* `list' contains char* */
char *gslist_to_string(GSList *list, const char *delimiter)
{
GString *str;
char *ret;
str = g_string_new(NULL);
while (list != NULL) {
if (str->len != 0) g_string_append(str, delimiter);
g_string_append(str, list->data);
list = list->next;
}
ret = str->str;
g_string_free(str, FALSE);
return ret;
}
void hash_save_key(char *key, void *value, GSList **list)
{
*list = g_slist_append(*list, key);
}
/* remove all the options from the optlist hash table that are valid for the
* command cmd */
GList *optlist_remove_known(const char *cmd, GHashTable *optlist)
{
GList *list, *tmp, *next;
list = g_hash_table_get_keys(optlist);
if (cmd != NULL && list != NULL) {
for (tmp = list; tmp != NULL; tmp = next) {
char *option = tmp->data;
next = tmp->next;
if (command_have_option(cmd, option))
list = g_list_remove(list, option);
}
}
return list;
}
GList *glist_find_string(GList *list, const char *key)
{
for (; list != NULL; list = list->next)
if (g_strcmp0(list->data, key) == 0) return list;
return NULL;
}
GList *glist_find_icase_string(GList *list, const char *key)
{
for (; list != NULL; list = list->next)
if (g_ascii_strcasecmp(list->data, key) == 0) return list;
return NULL;
}
char *stristr(const char *data, const char *key)
{
const char *max;
int keylen, datalen, pos;
keylen = strlen(key);
datalen = strlen(data);
if (keylen > datalen)
return NULL;
if (keylen == 0)
return (char *) data;
max = data+datalen-keylen;
pos = 0;
while (data <= max) {
if (key[pos] == '\0')
return (char *) data;
if (i_toupper(data[pos]) == i_toupper(key[pos]))
pos++;
else {
data++;
pos = 0;
}
}
return NULL;
}
#define isbound(c) \
((unsigned char) (c) < 128 && \
(i_isspace(c) || i_ispunct(c)))
static char *strstr_full_case(const char *data, const char *key, int icase)
{
const char *start, *max;
int keylen, datalen, pos, match;
keylen = strlen(key);
datalen = strlen(data);
if (keylen > datalen)
return NULL;
if (keylen == 0)
return (char *) data;
max = data+datalen-keylen;
start = data; pos = 0;
while (data <= max) {
if (key[pos] == '\0') {
if (data[pos] != '\0' && !isbound(data[pos])) {
data++;
pos = 0;
continue;
}
return (char *) data;
}
match = icase ? (i_toupper(data[pos]) == i_toupper(key[pos])) :
data[pos] == key[pos];
if (match && (pos != 0 || data == start || isbound(data[-1])))
pos++;
else {
data++;
pos = 0;
}
}
return NULL;
}
char *strstr_full(const char *data, const char *key)
{
return strstr_full_case(data, key, FALSE);
}
char *stristr_full(const char *data, const char *key)
{
return strstr_full_case(data, key, TRUE);
}
/* convert ~/ to $HOME */
char *convert_home(const char *path)
{
const char *home;
if (*path == '~' && (*(path+1) == '/' || *(path+1) == '\0')) {
home = g_get_home_dir();
if (home == NULL)
home = ".";
return g_strconcat(home, path+1, NULL);
} else {
return g_strdup(path);
}
}
int g_istr_equal(gconstpointer v, gconstpointer v2)
{
return g_ascii_strcasecmp((const char *) v, (const char *) v2) == 0;
}
int g_istr_cmp(gconstpointer v, gconstpointer v2)
{
return g_ascii_strcasecmp((const char *) v, (const char *) v2);
}
guint g_istr_hash(gconstpointer v)
{
const signed char *p;
guint32 h = 5381;
for (p = v; *p != '\0'; p++)
h = (h << 5) + h + g_ascii_toupper(*p);
return h;
}
/* Find `mask' from `data', you can use * and ? wildcards. */
int match_wildcards(const char *cmask, const char *data)
{
char *mask, *newmask, *p1, *p2;
int ret;
newmask = mask = g_strdup(cmask);
for (; *mask != '\0' && *data != '\0'; mask++) {
if (*mask != '*') {
if (*mask != '?' && i_toupper(*mask) != i_toupper(*data))
break;
data++;
continue;
}
while (*mask == '?' || *mask == '*') mask++;
if (*mask == '\0') {
data += strlen(data);
break;
}
p1 = strchr(mask, '*');
p2 = strchr(mask, '?');
if (p1 == NULL || (p2 < p1 && p2 != NULL)) p1 = p2;
if (p1 != NULL) *p1 = '\0';
data = stristr(data, mask);
if (data == NULL) break;
data += strlen(mask);
mask += strlen(mask)-1;
if (p1 != NULL) *p1 = p1 == p2 ? '?' : '*';
}
while (*mask == '*') mask++;
ret = data != NULL && *data == '\0' && *mask == '\0';
g_free(newmask);
return ret;
}
/* Return TRUE if all characters in `str' are numbers.
Stop when `end_char' is found from string. */
int is_numeric(const char *str, char end_char)
{
g_return_val_if_fail(str != NULL, FALSE);
if (*str == '\0' || *str == end_char)
return FALSE;
while (*str != '\0' && *str != end_char) {
if (!i_isdigit(*str)) return FALSE;
str++;
}
return TRUE;
}
/* replace all `from' chars in string to `to' chars. returns `str' */
char *replace_chars(char *str, char from, char to)
{
char *p;
for (p = str; *p != '\0'; p++) {
if (*p == from) *p = to;
}
return str;
}
int octal2dec(int octal)
{
int dec, n;
dec = 0; n = 1;
while (octal != 0) {
dec += n*(octal%10);
octal /= 10; n *= 8;
}
return dec;
}
int dec2octal(int decimal)
{
int octal, pos;
octal = 0; pos = 0;
while (decimal > 0) {
octal += (decimal & 7)*(pos == 0 ? 1 : pos);
decimal /= 8;
pos += 10;
}
return octal;
}
/* string -> uoff_t */
uoff_t str_to_uofft(const char *str)
{
#ifdef UOFF_T_LONG_LONG
return (uoff_t)strtoull(str, NULL, 10);
#else
return (uoff_t)strtoul(str, NULL, 10);
#endif
}
/* convert all low-ascii (<32) to ^<A..> combinations */
char *show_lowascii(const char *str)
{
char *ret, *p;
ret = p = g_malloc(strlen(str)*2+1);
while (*str != '\0') {
if ((unsigned char) *str >= 32)
*p++ = *str;
else {
*p++ = '^';
*p++ = *str + 'A'-1;
}
str++;
}
*p = '\0';
return ret;
}
/* Get time in human readable form with localtime() + asctime() */
char *my_asctime(time_t t)
{
struct tm *tm;
char *str;
int len;
tm = localtime(&t);
str = g_strdup(asctime(tm));
len = strlen(str);
if (len > 0) str[len-1] = '\0';
return str;
}
/* Returns number of columns needed to print items.
save_column_widths is filled with length of each column. */
int get_max_column_count(GSList *items, COLUMN_LEN_FUNC len_func,
int max_width, int max_columns,
int item_extra, int item_min_size,
int **save_column_widths, int *rows)
{
GSList *tmp;
int **columns, *columns_width, *columns_rows;
int item_pos, items_count;
int ret, len, max_len, n, col;
items_count = g_slist_length(items);
if (items_count == 0) {
*save_column_widths = NULL;
*rows = 0;
return 0;
}
len = max_width/(item_extra+item_min_size);
if (len <= 0) len = 1;
if (max_columns <= 0 || len < max_columns)
max_columns = len;
columns = g_new0(int *, max_columns);
columns_width = g_new0(int, max_columns);
columns_rows = g_new0(int, max_columns);
for (n = 1; n < max_columns; n++) {
columns[n] = g_new0(int, n+1);
columns_rows[n] = items_count <= n+1 ? 1 :
(items_count+n)/(n+1);
}
/* for each possible column count, save the column widths and
find the biggest column count that fits to screen. */
item_pos = 0; max_len = 0;
for (tmp = items; tmp != NULL; tmp = tmp->next) {
len = item_extra+len_func(tmp->data);
if (max_len < len)
max_len = len;
for (n = 1; n < max_columns; n++) {
if (columns_width[n] > max_width)
continue; /* too wide */
col = item_pos/columns_rows[n];
if (columns[n][col] < len) {
columns_width[n] += len-columns[n][col];
columns[n][col] = len;
}
}
item_pos++;
}
for (n = max_columns-1; n >= 1; n--) {
if (columns_width[n] <= max_width &&
columns[n][n] > 0)
break;
}
ret = n+1;
*save_column_widths = g_new(int, ret);
if (ret == 1) {
**save_column_widths = max_len;
*rows = 1;
} else {
memcpy(*save_column_widths, columns[ret-1], sizeof(int)*ret);
*rows = columns_rows[ret-1];
}
for (n = 1; n < max_columns; n++)
g_free(columns[n]);
g_free(columns_width);
g_free(columns_rows);
g_free(columns);
return ret;
}
/* Return a column sorted copy of a list. */
GSList *columns_sort_list(GSList *list, int rows)
{
GSList *tmp, *sorted;
int row, skip;
if (list == NULL || rows == 0)
return list;
sorted = NULL;
for (row = 0; row < rows; row++) {
tmp = g_slist_nth(list, row);
skip = 1;
for (; tmp != NULL; tmp = tmp->next) {
if (--skip == 0) {
skip = rows;
sorted = g_slist_append(sorted, tmp->data);
}
}
}
g_return_val_if_fail(g_slist_length(sorted) ==
g_slist_length(list), sorted);
return sorted;
}
/* Expand escape string, the first character in data should be the
one after '\'. Returns the expanded character or -1 if error. */
int expand_escape(const char **data)
{
char digit[4];
switch (**data) {
case 't':
return '\t';
case 'r':
return '\r';
case 'n':
return '\n';
case 'e':
return 27; /* ESC */
case '\\':
return '\\';
case 'x':
/* hex digit */
if (!i_isxdigit((*data)[1]) || !i_isxdigit((*data)[2]))
return -1;
digit[0] = (*data)[1];
digit[1] = (*data)[2];
digit[2] = '\0';
*data += 2;
return strtol(digit, NULL, 16);
case 'c':
/* control character (\cA = ^A) */
(*data)++;
return i_toupper(**data) - 64;
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
/* octal */
digit[1] = digit[2] = digit[3] = '\0';
digit[0] = (*data)[0];
if ((*data)[1] >= '0' && (*data)[1] <= '7') {
++*data;
digit[1] = **data;
if ((*data)[1] >= '0' && (*data)[1] <= '7') {
++*data;
digit[2] = **data;
}
}
return strtol(digit, NULL, 8);
default:
return -1;
}
}
/* Escape all '"', "'" and '\' chars with '\' */
char *escape_string(const char *str)
{
char *ret, *p;
p = ret = g_malloc(strlen(str)*2+1);
while (*str != '\0') {
if (*str == '"' || *str == '\'' || *str == '\\')
*p++ = '\\';
*p++ = *str++;
}
*p = '\0';
return ret;
}
int nearest_power(int num)
{
int n = 1;
while (n < num) n <<= 1;
return n;
}
/* Parses unsigned integers from strings with decent error checking.
* Returns true on success, false otherwise (overflow, no valid number, etc)
* There's a 31 bit limit so the output can be assigned to signed positive ints */
int parse_uint(const char *nptr, char **endptr, int base, guint *number)
{
char *endptr_;
gulong parsed;
/* strtoul accepts whitespace and plus/minus signs, for some reason */
if (!i_isdigit(*nptr)) {
return FALSE;
}
errno = 0;
parsed = strtoul(nptr, &endptr_, base);
if (errno || endptr_ == nptr || parsed >= (1U << 31)) {
return FALSE;
}
if (endptr) {
*endptr = endptr_;
}
if (number) {
*number = (guint) parsed;
}
return TRUE;
}
static int parse_number_sign(const char *input, char **endptr, int *sign)
{
int sign_ = 1;
while (i_isspace(*input))
input++;
if (*input == '-') {
sign_ = -sign_;
input++;
}
*sign = sign_;
*endptr = (char *) input;
return TRUE;
}
static int parse_time_interval_uint(const char *time, guint *msecs)
{
const char *desc;
guint number;
int len, ret, digits;
*msecs = 0;
/* max. return value is around 24 days */
number = 0; ret = TRUE; digits = FALSE;
while (i_isspace(*time))
time++;
for (;;) {
if (i_isdigit(*time)) {
char *endptr;
if (!parse_uint(time, &endptr, 10, &number)) {
return FALSE;
}
time = endptr;
digits = TRUE;
continue;
}
if (!digits)
return FALSE;
/* skip punctuation */
while (*time != '\0' && i_ispunct(*time) && *time != '-')
time++;
/* get description */
for (len = 0, desc = time; i_isalpha(*time); time++)
len++;
while (i_isspace(*time))
time++;
if (len == 0) {
if (*time != '\0')
return FALSE;
*msecs += number * 1000; /* assume seconds */
return TRUE;
}
if (g_ascii_strncasecmp(desc, "days", len) == 0) {
if (number > 24) {
/* would overflow */
return FALSE;
}
*msecs += number * 1000*3600*24;
} else if (g_ascii_strncasecmp(desc, "hours", len) == 0)
*msecs += number * 1000*3600;
else if (g_ascii_strncasecmp(desc, "minutes", len) == 0 ||
g_ascii_strncasecmp(desc, "mins", len) == 0)
*msecs += number * 1000*60;
else if (g_ascii_strncasecmp(desc, "seconds", len) == 0 ||
g_ascii_strncasecmp(desc, "secs", len) == 0)
*msecs += number * 1000;
else if (g_ascii_strncasecmp(desc, "milliseconds", len) == 0 ||
g_ascii_strncasecmp(desc, "millisecs", len) == 0 ||
g_ascii_strncasecmp(desc, "mseconds", len) == 0 ||
g_ascii_strncasecmp(desc, "msecs", len) == 0)
*msecs += number;
else {
ret = FALSE;
}
/* skip punctuation */
while (*time != '\0' && i_ispunct(*time) && *time != '-')
time++;
if (*time == '\0')
break;
number = 0;
digits = FALSE;
}
return ret;
}
static int parse_size_uint(const char *size, guint *bytes)
{
const char *desc;
guint number, multiplier, limit;
int len;
*bytes = 0;
/* max. return value is about 1.6 years */
number = 0;
while (*size != '\0') {
if (i_isdigit(*size)) {
char *endptr;
if (!parse_uint(size, &endptr, 10, &number)) {
return FALSE;
}
size = endptr;
continue;
}
/* skip punctuation */
while (*size != '\0' && i_ispunct(*size))
size++;
/* get description */
for (len = 0, desc = size; i_isalpha(*size); size++)
len++;
if (len == 0) {
if (number == 0) {
/* "0" - allow it */
return TRUE;
}
*bytes += number*1024; /* assume kilobytes */
return FALSE;
}
multiplier = 0;
limit = 0;
if (g_ascii_strncasecmp(desc, "gbytes", len) == 0) {
multiplier = 1U << 30;
limit = 2U << 0;
}
if (g_ascii_strncasecmp(desc, "mbytes", len) == 0) {
multiplier = 1U << 20;
limit = 2U << 10;
}
if (g_ascii_strncasecmp(desc, "kbytes", len) == 0) {
multiplier = 1U << 10;
limit = 2U << 20;
}
if (g_ascii_strncasecmp(desc, "bytes", len) == 0) {
multiplier = 1;
limit = 2U << 30;
}
if (limit && number > limit) {
return FALSE;
}
*bytes += number * multiplier;
/* skip punctuation */
while (*size != '\0' && i_ispunct(*size))
size++;
}
return TRUE;
}
int parse_size(const char *size, int *bytes)
{
guint bytes_;
int ret;
ret = parse_size_uint(size, &bytes_);
if (bytes_ > (1U << 31)) {
return FALSE;
}
*bytes = bytes_;
return ret;
}
int parse_time_interval(const char *time, int *msecs)
{
guint msecs_;
char *number;
int ret, sign;
parse_number_sign(time, &number, &sign);
ret = parse_time_interval_uint(number, &msecs_);
if (msecs_ > (1U << 31)) {
return FALSE;
}
*msecs = msecs_ * sign;
return ret;
}
char *ascii_strup(char *str)
{
char *s;
for (s = str; *s; s++)
*s = g_ascii_toupper (*s);
return str;
}
char *ascii_strdown(char *str)
{
char *s;
for (s = str; *s; s++)
*s = g_ascii_tolower (*s);
return str;
}
char **strsplit_len(const char *str, int len, gboolean onspace)
{
char **ret = g_new(char *, 1);
int n;
int offset;
for (n = 0; *str != '\0'; n++, str += offset) {
offset = MIN(len, strlen(str));
if (onspace && strlen(str) > len) {
/*
* Try to find a space to split on and leave
* the space on the previous line.
*/
int i;
for (i = len - 1; i > 0; i--) {
if (str[i] == ' ') {
offset = i;
break;
}
}
}
ret[n] = g_strndup(str, offset);
ret = g_renew(char *, ret, n + 2);
}
ret[n] = NULL;
return ret;
}
char *binary_to_hex(unsigned char *buffer, size_t size)
{
static const char hex[] = "0123456789ABCDEF";
char *result = NULL;
int i;
if (buffer == NULL || size == 0)
return NULL;
result = g_malloc(3 * size);
for (i = 0; i < size; i++) {
result[i * 3 + 0] = hex[(buffer[i] >> 4) & 0xf];
result[i * 3 + 1] = hex[(buffer[i] >> 0) & 0xf];
result[i * 3 + 2] = i == size - 1 ? '\0' : ':';
}
return result;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_2561_0 |
crossvul-cpp_data_bad_4816_0 | /*
* Software multibuffer async crypto daemon.
*
* Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
*
* Adapted from crypto daemon.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/mcryptd.h>
#include <crypto/crypto_wq.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/hardirq.h>
#define MCRYPTD_MAX_CPU_QLEN 100
#define MCRYPTD_BATCH 9
static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
unsigned int tail);
struct mcryptd_flush_list {
struct list_head list;
struct mutex lock;
};
static struct mcryptd_flush_list __percpu *mcryptd_flist;
struct hashd_instance_ctx {
struct crypto_ahash_spawn spawn;
struct mcryptd_queue *queue;
};
static void mcryptd_queue_worker(struct work_struct *work);
void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
{
struct mcryptd_flush_list *flist;
if (!cstate->flusher_engaged) {
/* put the flusher on the flush list */
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
mutex_lock(&flist->lock);
list_add_tail(&cstate->flush_list, &flist->list);
cstate->flusher_engaged = true;
cstate->next_flush = jiffies + delay;
queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
&cstate->flush, delay);
mutex_unlock(&flist->lock);
}
}
EXPORT_SYMBOL(mcryptd_arm_flusher);
static int mcryptd_init_queue(struct mcryptd_queue *queue,
unsigned int max_cpu_qlen)
{
int cpu;
struct mcryptd_cpu_queue *cpu_queue;
queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
if (!queue->cpu_queue)
return -ENOMEM;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
}
return 0;
}
static void mcryptd_fini_queue(struct mcryptd_queue *queue)
{
int cpu;
struct mcryptd_cpu_queue *cpu_queue;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
BUG_ON(cpu_queue->queue.qlen);
}
free_percpu(queue->cpu_queue);
}
static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
struct crypto_async_request *request,
struct mcryptd_hash_request_ctx *rctx)
{
int cpu, err;
struct mcryptd_cpu_queue *cpu_queue;
cpu = get_cpu();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
rctx->tag.cpu = cpu;
err = crypto_enqueue_request(&cpu_queue->queue, request);
pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
cpu, cpu_queue, request);
queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
put_cpu();
return err;
}
/*
* Try to opportunisticlly flush the partially completed jobs if
* crypto daemon is the only task running.
*/
static void mcryptd_opportunistic_flush(void)
{
struct mcryptd_flush_list *flist;
struct mcryptd_alg_cstate *cstate;
flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
while (single_task_running()) {
mutex_lock(&flist->lock);
cstate = list_first_entry_or_null(&flist->list,
struct mcryptd_alg_cstate, flush_list);
if (!cstate || !cstate->flusher_engaged) {
mutex_unlock(&flist->lock);
return;
}
list_del(&cstate->flush_list);
cstate->flusher_engaged = false;
mutex_unlock(&flist->lock);
cstate->alg_state->flusher(cstate);
}
}
/*
* Called in workqueue context, do one real cryption work (via
* req->complete) and reschedule itself if there are more work to
* do.
*/
static void mcryptd_queue_worker(struct work_struct *work)
{
struct mcryptd_cpu_queue *cpu_queue;
struct crypto_async_request *req, *backlog;
int i;
/*
* Need to loop through more than once for multi-buffer to
* be effective.
*/
cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
i = 0;
while (i < MCRYPTD_BATCH || single_task_running()) {
/*
* preempt_disable/enable is used to prevent
* being preempted by mcryptd_enqueue_request()
*/
local_bh_disable();
preempt_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
preempt_enable();
local_bh_enable();
if (!req) {
mcryptd_opportunistic_flush();
return;
}
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
req->complete(req, 0);
if (!cpu_queue->queue.qlen)
return;
++i;
}
if (cpu_queue->queue.qlen)
queue_work(kcrypto_wq, &cpu_queue->work);
}
void mcryptd_flusher(struct work_struct *__work)
{
struct mcryptd_alg_cstate *alg_cpu_state;
struct mcryptd_alg_state *alg_state;
struct mcryptd_flush_list *flist;
int cpu;
cpu = smp_processor_id();
alg_cpu_state = container_of(to_delayed_work(__work),
struct mcryptd_alg_cstate, flush);
alg_state = alg_cpu_state->alg_state;
if (alg_cpu_state->cpu != cpu)
pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
cpu, alg_cpu_state->cpu);
if (alg_cpu_state->flusher_engaged) {
flist = per_cpu_ptr(mcryptd_flist, cpu);
mutex_lock(&flist->lock);
list_del(&alg_cpu_state->flush_list);
alg_cpu_state->flusher_engaged = false;
mutex_unlock(&flist->lock);
alg_state->flusher(alg_cpu_state);
}
}
EXPORT_SYMBOL_GPL(mcryptd_flusher);
static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
return ictx->queue;
}
static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
unsigned int tail)
{
char *p;
struct crypto_instance *inst;
int err;
p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
inst = (void *)(p + head);
err = -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_inst;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
out:
return p;
out_free_inst:
kfree(p);
p = ERR_PTR(err);
goto out;
}
static inline void mcryptd_check_internal(struct rtattr **tb, u32 *type,
u32 *mask)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return;
if ((algt->type & CRYPTO_ALG_INTERNAL))
*type |= CRYPTO_ALG_INTERNAL;
if ((algt->mask & CRYPTO_ALG_INTERNAL))
*mask |= CRYPTO_ALG_INTERNAL;
}
static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
struct crypto_ahash_spawn *spawn = &ictx->spawn;
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
struct crypto_ahash *hash;
hash = crypto_spawn_ahash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
ctx->child = hash;
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct mcryptd_hash_request_ctx) +
crypto_ahash_reqsize(hash));
return 0;
}
static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_ahash(ctx->child);
}
static int mcryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_ahash *child = ctx->child;
int err;
crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(child, key, keylen);
crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
static int mcryptd_hash_enqueue(struct ahash_request *req,
crypto_completion_t complete)
{
int ret;
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct mcryptd_queue *queue =
mcryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
req->base.complete = complete;
ret = mcryptd_enqueue_request(queue, &req->base, rctx);
return ret;
}
static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_ahash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct ahash_request *desc = &rctx->areq;
if (unlikely(err == -EINPROGRESS))
goto out;
ahash_request_set_tfm(desc, child);
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
rctx->complete, req_async);
rctx->out = req->result;
err = crypto_ahash_init(desc);
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_init_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_init);
}
static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
err = ahash_mcryptd_update(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
}
return;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_update_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_update);
}
static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
err = ahash_mcryptd_final(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
}
return;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_final_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_final);
}
static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
{
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
if (unlikely(err == -EINPROGRESS))
goto out;
rctx->out = req->result;
err = ahash_mcryptd_finup(&rctx->areq);
if (err) {
req->base.complete = rctx->complete;
goto out;
}
return;
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
}
static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
{
struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
struct crypto_ahash *child = ctx->child;
struct ahash_request *req = ahash_request_cast(req_async);
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct ahash_request *desc = &rctx->areq;
if (unlikely(err == -EINPROGRESS))
goto out;
ahash_request_set_tfm(desc, child);
ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
rctx->complete, req_async);
rctx->out = req->result;
err = ahash_mcryptd_digest(desc);
out:
local_bh_disable();
rctx->complete(&req->base, err);
local_bh_enable();
}
static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
{
return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
}
static int mcryptd_hash_export(struct ahash_request *req, void *out)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_ahash_export(&rctx->areq, out);
}
static int mcryptd_hash_import(struct ahash_request *req, const void *in)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_ahash_import(&rctx->areq, in);
}
static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct mcryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
struct hash_alg_common *halg;
struct crypto_alg *alg;
u32 type = 0;
u32 mask = 0;
int err;
mcryptd_check_internal(tb, &type, &mask);
halg = ahash_attr_alg(tb[1], type, mask);
if (IS_ERR(halg))
return PTR_ERR(halg);
alg = &halg->base;
pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
sizeof(*ctx));
err = PTR_ERR(inst);
if (IS_ERR(inst))
goto out_put_alg;
ctx = ahash_instance_ctx(inst);
ctx->queue = queue;
err = crypto_init_ahash_spawn(&ctx->spawn, halg,
ahash_crypto_instance(inst));
if (err)
goto out_free_inst;
type = CRYPTO_ALG_ASYNC;
if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
type |= CRYPTO_ALG_INTERNAL;
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = halg->digestsize;
inst->alg.halg.statesize = halg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
inst->alg.init = mcryptd_hash_init_enqueue;
inst->alg.update = mcryptd_hash_update_enqueue;
inst->alg.final = mcryptd_hash_final_enqueue;
inst->alg.finup = mcryptd_hash_finup_enqueue;
inst->alg.export = mcryptd_hash_export;
inst->alg.import = mcryptd_hash_import;
inst->alg.setkey = mcryptd_hash_setkey;
inst->alg.digest = mcryptd_hash_digest_enqueue;
err = ahash_register_instance(tmpl, inst);
if (err) {
crypto_drop_ahash(&ctx->spawn);
out_free_inst:
kfree(inst);
}
out_put_alg:
crypto_mod_put(alg);
return err;
}
static struct mcryptd_queue mqueue;
static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_DIGEST:
return mcryptd_create_hash(tmpl, tb, &mqueue);
break;
}
return -EINVAL;
}
static void mcryptd_free(struct crypto_instance *inst)
{
struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AHASH:
crypto_drop_ahash(&hctx->spawn);
kfree(ahash_instance(inst));
return;
default:
crypto_drop_spawn(&ctx->spawn);
kfree(inst);
}
}
static struct crypto_template mcryptd_tmpl = {
.name = "mcryptd",
.create = mcryptd_create,
.free = mcryptd_free,
.module = THIS_MODULE,
};
struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct crypto_ahash *tfm;
if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ahash(tfm);
return ERR_PTR(-EINVAL);
}
return __mcryptd_ahash_cast(tfm);
}
EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
int ahash_mcryptd_digest(struct ahash_request *desc)
{
return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc);
}
int ahash_mcryptd_update(struct ahash_request *desc)
{
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return crypto_ahash_update(desc);
}
int ahash_mcryptd_finup(struct ahash_request *desc)
{
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return crypto_ahash_finup(desc);
}
int ahash_mcryptd_final(struct ahash_request *desc)
{
/* alignment is to be done by multi-buffer crypto algorithm if needed */
return crypto_ahash_final(desc);
}
struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
{
struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
{
struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return &rctx->areq;
}
EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
{
crypto_free_ahash(&tfm->base);
}
EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
static int __init mcryptd_init(void)
{
int err, cpu;
struct mcryptd_flush_list *flist;
mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
for_each_possible_cpu(cpu) {
flist = per_cpu_ptr(mcryptd_flist, cpu);
INIT_LIST_HEAD(&flist->list);
mutex_init(&flist->lock);
}
err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
if (err) {
free_percpu(mcryptd_flist);
return err;
}
err = crypto_register_template(&mcryptd_tmpl);
if (err) {
mcryptd_fini_queue(&mqueue);
free_percpu(mcryptd_flist);
}
return err;
}
static void __exit mcryptd_exit(void)
{
mcryptd_fini_queue(&mqueue);
crypto_unregister_template(&mcryptd_tmpl);
free_percpu(mcryptd_flist);
}
subsys_initcall(mcryptd_init);
module_exit(mcryptd_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
MODULE_ALIAS_CRYPTO("mcryptd");
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_4816_0 |
crossvul-cpp_data_bad_3959_1 | // SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
* Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
* Copyright (c) 2002, 2003 Tuukka Toivonen
* Copyright (c) 2008 Erik Andrén
*
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
* P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
* P/N 861075-0040: Sensor HDCS1000 ASIC
* P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
* P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
*/
/*
* The spec file for the PB-0100 suggests the following for best quality
* images after the sensor has been reset :
*
* PB_ADCGAINL = R60 = 0x03 (3 dec) : sets low reference of ADC
to produce good black level
* PB_PREADCTRL = R32 = 0x1400 (5120 dec) : Enables global gain changes
through R53
* PB_ADCMINGAIN = R52 = 0x10 (16 dec) : Sets the minimum gain for
auto-exposure
* PB_ADCGLOBALGAIN = R53 = 0x10 (16 dec) : Sets the global gain
* PB_EXPGAIN = R14 = 0x11 (17 dec) : Sets the auto-exposure value
* PB_UPDATEINT = R23 = 0x02 (2 dec) : Sets the speed on
auto-exposure routine
* PB_CFILLIN = R5 = 0x0E (14 dec) : Sets the frame rate
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "stv06xx_pb0100.h"
struct pb0100_ctrls {
struct { /* one big happy control cluster... */
struct v4l2_ctrl *autogain;
struct v4l2_ctrl *gain;
struct v4l2_ctrl *exposure;
struct v4l2_ctrl *red;
struct v4l2_ctrl *blue;
struct v4l2_ctrl *natural;
};
struct v4l2_ctrl *target;
};
static struct v4l2_pix_format pb0100_mode[] = {
/* low res / subsample modes disabled as they are only half res horizontal,
halving the vertical resolution does not seem to work */
{
320,
240,
V4L2_PIX_FMT_SGRBG8,
V4L2_FIELD_NONE,
.sizeimage = 320 * 240,
.bytesperline = 320,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = PB0100_CROP_TO_VGA
},
{
352,
288,
V4L2_PIX_FMT_SGRBG8,
V4L2_FIELD_NONE,
.sizeimage = 352 * 288,
.bytesperline = 352,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
}
};
static int pb0100_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
struct sd *sd = (struct sd *)gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
int err = -EINVAL;
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
err = pb0100_set_autogain(gspca_dev, ctrl->val);
if (err)
break;
if (ctrl->val)
break;
err = pb0100_set_gain(gspca_dev, ctrls->gain->val);
if (err)
break;
err = pb0100_set_exposure(gspca_dev, ctrls->exposure->val);
break;
case V4L2_CTRL_CLASS_USER + 0x1001:
err = pb0100_set_autogain_target(gspca_dev, ctrl->val);
break;
}
return err;
}
static const struct v4l2_ctrl_ops pb0100_ctrl_ops = {
.s_ctrl = pb0100_s_ctrl,
};
static int pb0100_init_controls(struct sd *sd)
{
struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
struct pb0100_ctrls *ctrls;
static const struct v4l2_ctrl_config autogain_target = {
.ops = &pb0100_ctrl_ops,
.id = V4L2_CTRL_CLASS_USER + 0x1000,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Automatic Gain Target",
.max = 255,
.step = 1,
.def = 128,
};
static const struct v4l2_ctrl_config natural_light = {
.ops = &pb0100_ctrl_ops,
.id = V4L2_CTRL_CLASS_USER + 0x1001,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Natural Light Source",
.max = 1,
.step = 1,
.def = 1,
};
ctrls = kzalloc(sizeof(*ctrls), GFP_KERNEL);
if (!ctrls)
return -ENOMEM;
v4l2_ctrl_handler_init(hdl, 6);
ctrls->autogain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
ctrls->exposure = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 511, 1, 12);
ctrls->gain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_GAIN, 0, 255, 1, 128);
ctrls->red = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_RED_BALANCE, -255, 255, 1, 0);
ctrls->blue = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_BLUE_BALANCE, -255, 255, 1, 0);
ctrls->natural = v4l2_ctrl_new_custom(hdl, &natural_light, NULL);
ctrls->target = v4l2_ctrl_new_custom(hdl, &autogain_target, NULL);
if (hdl->error) {
kfree(ctrls);
return hdl->error;
}
sd->sensor_priv = ctrls;
v4l2_ctrl_auto_cluster(5, &ctrls->autogain, 0, false);
return 0;
}
static int pb0100_probe(struct sd *sd)
{
u16 sensor;
int err;
err = stv06xx_read_sensor(sd, PB_IDENT, &sensor);
if (err < 0)
return -ENODEV;
if ((sensor >> 8) != 0x64)
return -ENODEV;
pr_info("Photobit pb0100 sensor detected\n");
sd->gspca_dev.cam.cam_mode = pb0100_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
return 0;
}
static int pb0100_start(struct sd *sd)
{
int err, packet_size, max_packet_size;
struct usb_host_interface *alt;
struct usb_interface *intf;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
struct cam *cam = &sd->gspca_dev.cam;
u32 mode = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt)
return -ENODEV;
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
/* If we don't have enough bandwidth use a lower framerate */
max_packet_size = sd->sensor->max_packet_size[sd->gspca_dev.curr_mode];
if (packet_size < max_packet_size)
stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1));
else
stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(5)|BIT(3)|BIT(1));
/* Setup sensor window */
if (mode & PB0100_CROP_TO_VGA) {
stv06xx_write_sensor(sd, PB_RSTART, 30);
stv06xx_write_sensor(sd, PB_CSTART, 20);
stv06xx_write_sensor(sd, PB_RWSIZE, 240 - 1);
stv06xx_write_sensor(sd, PB_CWSIZE, 320 - 1);
} else {
stv06xx_write_sensor(sd, PB_RSTART, 8);
stv06xx_write_sensor(sd, PB_CSTART, 4);
stv06xx_write_sensor(sd, PB_RWSIZE, 288 - 1);
stv06xx_write_sensor(sd, PB_CWSIZE, 352 - 1);
}
if (mode & PB0100_SUBSAMPLE) {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02); /* Wrong, FIXME */
stv06xx_write_bridge(sd, STV_X_CTRL, 0x06);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x10);
} else {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01);
stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a);
/* larger -> slower */
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
}
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)|BIT(1));
gspca_dbg(gspca_dev, D_STREAM, "Started stream, status: %d\n", err);
return (err < 0) ? err : 0;
}
static int pb0100_stop(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int err;
err = stv06xx_write_sensor(sd, PB_ABORTFRAME, 1);
if (err < 0)
goto out;
/* Set bit 1 to zero */
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
gspca_dbg(gspca_dev, D_STREAM, "Halting stream\n");
out:
return (err < 0) ? err : 0;
}
/* FIXME: Sort the init commands out and put them into tables,
this is only for getting the camera to work */
/* FIXME: No error handling for now,
add this once the init has been converted to proper tables */
static int pb0100_init(struct sd *sd)
{
stv06xx_write_bridge(sd, STV_REG00, 1);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0);
/* Reset sensor */
stv06xx_write_sensor(sd, PB_RESET, 1);
stv06xx_write_sensor(sd, PB_RESET, 0);
/* Disable chip */
stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
/* Gain stuff...*/
stv06xx_write_sensor(sd, PB_PREADCTRL, BIT(12)|BIT(10)|BIT(6));
stv06xx_write_sensor(sd, PB_ADCGLOBALGAIN, 12);
/* Set up auto-exposure */
/* ADC VREF_HI new setting for a transition
from the Expose1 to the Expose2 setting */
stv06xx_write_sensor(sd, PB_R28, 12);
/* gain max for autoexposure */
stv06xx_write_sensor(sd, PB_ADCMAXGAIN, 180);
/* gain min for autoexposure */
stv06xx_write_sensor(sd, PB_ADCMINGAIN, 12);
/* Maximum frame integration time (programmed into R8)
allowed for auto-exposure routine */
stv06xx_write_sensor(sd, PB_R54, 3);
/* Minimum frame integration time (programmed into R8)
allowed for auto-exposure routine */
stv06xx_write_sensor(sd, PB_R55, 0);
stv06xx_write_sensor(sd, PB_UPDATEINT, 1);
/* R15 Expose0 (maximum that auto-exposure may use) */
stv06xx_write_sensor(sd, PB_R15, 800);
/* R17 Expose2 (minimum that auto-exposure may use) */
stv06xx_write_sensor(sd, PB_R17, 10);
stv06xx_write_sensor(sd, PB_EXPGAIN, 0);
/* 0x14 */
stv06xx_write_sensor(sd, PB_VOFFSET, 0);
/* 0x0D */
stv06xx_write_sensor(sd, PB_ADCGAINH, 11);
/* Set black level (important!) */
stv06xx_write_sensor(sd, PB_ADCGAINL, 0);
/* ??? */
stv06xx_write_bridge(sd, STV_REG00, 0x11);
stv06xx_write_bridge(sd, STV_REG03, 0x45);
stv06xx_write_bridge(sd, STV_REG04, 0x07);
/* Scan/timing for the sensor */
stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1));
stv06xx_write_sensor(sd, PB_CFILLIN, 14);
stv06xx_write_sensor(sd, PB_VBL, 0);
stv06xx_write_sensor(sd, PB_FINTTIME, 0);
stv06xx_write_sensor(sd, PB_RINTTIME, 123);
stv06xx_write_bridge(sd, STV_REG01, 0xc2);
stv06xx_write_bridge(sd, STV_REG02, 0xb0);
return 0;
}
static int pb0100_dump(struct sd *sd)
{
return 0;
}
static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
err = stv06xx_write_sensor(sd, PB_G1GAIN, val);
if (!err)
err = stv06xx_write_sensor(sd, PB_G2GAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set green gain to %d, status: %d\n",
val, err);
if (!err)
err = pb0100_set_red_balance(gspca_dev, ctrls->red->val);
if (!err)
err = pb0100_set_blue_balance(gspca_dev, ctrls->blue->val);
return err;
}
static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
val = 255;
err = stv06xx_write_sensor(sd, PB_RGAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set red gain to %d, status: %d\n",
val, err);
return err;
}
static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
val = 255;
err = stv06xx_write_sensor(sd, PB_BGAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set blue gain to %d, status: %d\n",
val, err);
return err;
}
static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
int err;
err = stv06xx_write_sensor(sd, PB_RINTTIME, val);
gspca_dbg(gspca_dev, D_CONF, "Set exposure to %d, status: %d\n",
val, err);
return err;
}
static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
if (val) {
if (ctrls->natural->val)
val = BIT(6)|BIT(4)|BIT(0);
else
val = BIT(4)|BIT(0);
} else
val = 0;
err = stv06xx_write_sensor(sd, PB_EXPGAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set autogain to %d (natural: %d), status: %d\n",
val, ctrls->natural->val, err);
return err;
}
static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
{
int err, totalpixels, brightpixels, darkpixels;
struct sd *sd = (struct sd *) gspca_dev;
/* Number of pixels counted by the sensor when subsampling the pixels.
* Slightly larger than the real value to avoid oscillation */
totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
brightpixels = (totalpixels * val) >> 8;
darkpixels = totalpixels - brightpixels;
err = stv06xx_write_sensor(sd, PB_R21, brightpixels);
if (!err)
err = stv06xx_write_sensor(sd, PB_R22, darkpixels);
gspca_dbg(gspca_dev, D_CONF, "Set autogain target to %d, status: %d\n",
val, err);
return err;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3959_1 |
crossvul-cpp_data_good_322_0 | /************************************************************
* Copyright (c) 1994 by Silicon Graphics Computer Systems, Inc.
*
* Permission to use, copy, modify, and distribute this
* software and its documentation for any purpose and without
* fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting
* documentation, and that the name of Silicon Graphics not be
* used in advertising or publicity pertaining to distribution
* of the software without specific prior written permission.
* Silicon Graphics makes no representation about the suitability
* of this software for any purpose. It is provided "as is"
* without any express or implied warranty.
*
* SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
* SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
* GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH
* THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
********************************************************/
#include "xkbcomp-priv.h"
#include "text.h"
#include "expr.h"
typedef bool (*IdentLookupFunc)(struct xkb_context *ctx, const void *priv,
xkb_atom_t field, enum expr_value_type type,
unsigned int *val_rtrn);
bool
ExprResolveLhs(struct xkb_context *ctx, const ExprDef *expr,
const char **elem_rtrn, const char **field_rtrn,
ExprDef **index_rtrn)
{
switch (expr->expr.op) {
case EXPR_IDENT:
*elem_rtrn = NULL;
*field_rtrn = xkb_atom_text(ctx, expr->ident.ident);
*index_rtrn = NULL;
return (*field_rtrn != NULL);
case EXPR_FIELD_REF:
*elem_rtrn = xkb_atom_text(ctx, expr->field_ref.element);
*field_rtrn = xkb_atom_text(ctx, expr->field_ref.field);
*index_rtrn = NULL;
return (*elem_rtrn != NULL && *field_rtrn != NULL);
case EXPR_ARRAY_REF:
*elem_rtrn = xkb_atom_text(ctx, expr->array_ref.element);
*field_rtrn = xkb_atom_text(ctx, expr->array_ref.field);
*index_rtrn = expr->array_ref.entry;
if (expr->array_ref.element != XKB_ATOM_NONE && *elem_rtrn == NULL)
return false;
if (*field_rtrn == NULL)
return false;
return true;
default:
break;
}
log_wsgo(ctx, "Unexpected operator %d in ResolveLhs\n", expr->expr.op);
return false;
}
static bool
SimpleLookup(struct xkb_context *ctx, const void *priv, xkb_atom_t field,
enum expr_value_type type, unsigned int *val_rtrn)
{
const LookupEntry *entry;
const char *str;
if (!priv || field == XKB_ATOM_NONE || type != EXPR_TYPE_INT)
return false;
str = xkb_atom_text(ctx, field);
for (entry = priv; entry && entry->name; entry++) {
if (istreq(str, entry->name)) {
*val_rtrn = entry->value;
return true;
}
}
return false;
}
/* Data passed in the *priv argument for LookupModMask. */
typedef struct {
const struct xkb_mod_set *mods;
enum mod_type mod_type;
} LookupModMaskPriv;
static bool
LookupModMask(struct xkb_context *ctx, const void *priv, xkb_atom_t field,
enum expr_value_type type, xkb_mod_mask_t *val_rtrn)
{
const char *str;
xkb_mod_index_t ndx;
const LookupModMaskPriv *arg = priv;
const struct xkb_mod_set *mods = arg->mods;
enum mod_type mod_type = arg->mod_type;
if (type != EXPR_TYPE_INT)
return false;
str = xkb_atom_text(ctx, field);
if (!str)
return false;
if (istreq(str, "all")) {
*val_rtrn = MOD_REAL_MASK_ALL;
return true;
}
if (istreq(str, "none")) {
*val_rtrn = 0;
return true;
}
ndx = XkbModNameToIndex(mods, field, mod_type);
if (ndx == XKB_MOD_INVALID)
return false;
*val_rtrn = (1u << ndx);
return true;
}
bool
ExprResolveBoolean(struct xkb_context *ctx, const ExprDef *expr,
bool *set_rtrn)
{
bool ok = false;
const char *ident;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_BOOLEAN) {
log_err(ctx,
"Found constant of type %s where boolean was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*set_rtrn = expr->boolean.set;
return true;
case EXPR_IDENT:
ident = xkb_atom_text(ctx, expr->ident.ident);
if (ident) {
if (istreq(ident, "true") ||
istreq(ident, "yes") ||
istreq(ident, "on")) {
*set_rtrn = true;
return true;
}
else if (istreq(ident, "false") ||
istreq(ident, "no") ||
istreq(ident, "off")) {
*set_rtrn = false;
return true;
}
}
log_err(ctx, "Identifier \"%s\" of type boolean is unknown\n", ident);
return false;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type boolean is unknown\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_INVERT:
case EXPR_NOT:
ok = ExprResolveBoolean(ctx, expr->unary.child, set_rtrn);
if (ok)
*set_rtrn = !*set_rtrn;
return ok;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
case EXPR_ASSIGN:
case EXPR_NEGATE:
case EXPR_UNARY_PLUS:
log_err(ctx, "%s of boolean values not permitted\n",
expr_op_type_to_string(expr->expr.op));
break;
default:
log_wsgo(ctx, "Unknown operator %d in ResolveBoolean\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveKeyCode(struct xkb_context *ctx, const ExprDef *expr,
xkb_keycode_t *kc)
{
xkb_keycode_t leftRtrn, rightRtrn;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_INT) {
log_err(ctx,
"Found constant of type %s where an int was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*kc = (xkb_keycode_t) expr->integer.ival;
return true;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
if (!ExprResolveKeyCode(ctx, expr->binary.left, &leftRtrn) ||
!ExprResolveKeyCode(ctx, expr->binary.right, &rightRtrn))
return false;
switch (expr->expr.op) {
case EXPR_ADD:
*kc = leftRtrn + rightRtrn;
break;
case EXPR_SUBTRACT:
*kc = leftRtrn - rightRtrn;
break;
case EXPR_MULTIPLY:
*kc = leftRtrn * rightRtrn;
break;
case EXPR_DIVIDE:
if (rightRtrn == 0) {
log_err(ctx, "Cannot divide by zero: %d / %d\n",
leftRtrn, rightRtrn);
return false;
}
*kc = leftRtrn / rightRtrn;
break;
default:
break;
}
return true;
case EXPR_NEGATE:
if (!ExprResolveKeyCode(ctx, expr->unary.child, &leftRtrn))
return false;
*kc = ~leftRtrn;
return true;
case EXPR_UNARY_PLUS:
return ExprResolveKeyCode(ctx, expr->unary.child, kc);
default:
log_wsgo(ctx, "Unknown operator %d in ResolveKeyCode\n",
expr->expr.op);
break;
}
return false;
}
/**
* This function returns ... something. It's a bit of a guess, really.
*
* If an integer is given in value ctx, it will be returned in ival.
* If an ident or field reference is given, the lookup function (if given)
* will be called. At the moment, only SimpleLookup use this, and they both
* return the results in uval. And don't support field references.
*
* Cool.
*/
static bool
ExprResolveIntegerLookup(struct xkb_context *ctx, const ExprDef *expr,
int *val_rtrn, IdentLookupFunc lookup,
const void *lookupPriv)
{
bool ok = false;
int l, r;
unsigned u;
ExprDef *left, *right;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_INT) {
log_err(ctx,
"Found constant of type %s where an int was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*val_rtrn = expr->integer.ival;
return true;
case EXPR_IDENT:
if (lookup)
ok = lookup(ctx, lookupPriv, expr->ident.ident, EXPR_TYPE_INT, &u);
if (!ok)
log_err(ctx, "Identifier \"%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->ident.ident));
else
*val_rtrn = (int) u;
return ok;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
left = expr->binary.left;
right = expr->binary.right;
if (!ExprResolveIntegerLookup(ctx, left, &l, lookup, lookupPriv) ||
!ExprResolveIntegerLookup(ctx, right, &r, lookup, lookupPriv))
return false;
switch (expr->expr.op) {
case EXPR_ADD:
*val_rtrn = l + r;
break;
case EXPR_SUBTRACT:
*val_rtrn = l - r;
break;
case EXPR_MULTIPLY:
*val_rtrn = l * r;
break;
case EXPR_DIVIDE:
if (r == 0) {
log_err(ctx, "Cannot divide by zero: %d / %d\n", l, r);
return false;
}
*val_rtrn = l / r;
break;
default:
log_err(ctx, "%s of integers not permitted\n",
expr_op_type_to_string(expr->expr.op));
return false;
}
return true;
case EXPR_ASSIGN:
log_wsgo(ctx, "Assignment operator not implemented yet\n");
break;
case EXPR_NOT:
log_err(ctx, "The ! operator cannot be applied to an integer\n");
return false;
case EXPR_INVERT:
case EXPR_NEGATE:
left = expr->unary.child;
if (!ExprResolveIntegerLookup(ctx, left, &l, lookup, lookupPriv))
return false;
*val_rtrn = (expr->expr.op == EXPR_NEGATE ? -l : ~l);
return true;
case EXPR_UNARY_PLUS:
left = expr->unary.child;
return ExprResolveIntegerLookup(ctx, left, val_rtrn, lookup,
lookupPriv);
default:
log_wsgo(ctx, "Unknown operator %d in ResolveInteger\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveInteger(struct xkb_context *ctx, const ExprDef *expr,
int *val_rtrn)
{
return ExprResolveIntegerLookup(ctx, expr, val_rtrn, NULL, NULL);
}
bool
ExprResolveGroup(struct xkb_context *ctx, const ExprDef *expr,
xkb_layout_index_t *group_rtrn)
{
bool ok;
int result;
ok = ExprResolveIntegerLookup(ctx, expr, &result, SimpleLookup,
groupNames);
if (!ok)
return false;
if (result <= 0 || result > XKB_MAX_GROUPS) {
log_err(ctx, "Group index %u is out of range (1..%d)\n",
result, XKB_MAX_GROUPS);
return false;
}
*group_rtrn = (xkb_layout_index_t) result;
return true;
}
bool
ExprResolveLevel(struct xkb_context *ctx, const ExprDef *expr,
xkb_level_index_t *level_rtrn)
{
bool ok;
int result;
ok = ExprResolveIntegerLookup(ctx, expr, &result, SimpleLookup,
levelNames);
if (!ok)
return false;
if (result < 1) {
log_err(ctx, "Shift level %d is out of range\n", result);
return false;
}
/* Level is zero-indexed from now on. */
*level_rtrn = (unsigned int) (result - 1);
return true;
}
bool
ExprResolveButton(struct xkb_context *ctx, const ExprDef *expr, int *btn_rtrn)
{
return ExprResolveIntegerLookup(ctx, expr, btn_rtrn, SimpleLookup,
buttonNames);
}
bool
ExprResolveString(struct xkb_context *ctx, const ExprDef *expr,
xkb_atom_t *val_rtrn)
{
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_STRING) {
log_err(ctx, "Found constant of type %s, expected a string\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*val_rtrn = expr->string.str;
return true;
case EXPR_IDENT:
log_err(ctx, "Identifier \"%s\" of type string not found\n",
xkb_atom_text(ctx, expr->ident.ident));
return false;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type string not found\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
case EXPR_ASSIGN:
case EXPR_NEGATE:
case EXPR_INVERT:
case EXPR_NOT:
case EXPR_UNARY_PLUS:
log_err(ctx, "%s of strings not permitted\n",
expr_op_type_to_string(expr->expr.op));
return false;
default:
log_wsgo(ctx, "Unknown operator %d in ResolveString\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveEnum(struct xkb_context *ctx, const ExprDef *expr,
unsigned int *val_rtrn, const LookupEntry *values)
{
if (expr->expr.op != EXPR_IDENT) {
log_err(ctx, "Found a %s where an enumerated value was expected\n",
expr_op_type_to_string(expr->expr.op));
return false;
}
if (!SimpleLookup(ctx, values, expr->ident.ident, EXPR_TYPE_INT,
val_rtrn)) {
log_err(ctx, "Illegal identifier %s; expected one of:\n",
xkb_atom_text(ctx, expr->ident.ident));
while (values && values->name)
{
log_err(ctx, "\t%s\n", values->name);
values++;
}
return false;
}
return true;
}
static bool
ExprResolveMaskLookup(struct xkb_context *ctx, const ExprDef *expr,
unsigned int *val_rtrn, IdentLookupFunc lookup,
const void *lookupPriv)
{
bool ok = false;
unsigned int l = 0, r = 0;
int v;
ExprDef *left, *right;
const char *bogus = NULL;
switch (expr->expr.op) {
case EXPR_VALUE:
if (expr->expr.value_type != EXPR_TYPE_INT) {
log_err(ctx,
"Found constant of type %s where a mask was expected\n",
expr_value_type_to_string(expr->expr.value_type));
return false;
}
*val_rtrn = (unsigned int) expr->integer.ival;
return true;
case EXPR_IDENT:
ok = lookup(ctx, lookupPriv, expr->ident.ident, EXPR_TYPE_INT,
val_rtrn);
if (!ok)
log_err(ctx, "Identifier \"%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->ident.ident));
return ok;
case EXPR_FIELD_REF:
log_err(ctx, "Default \"%s.%s\" of type int is unknown\n",
xkb_atom_text(ctx, expr->field_ref.element),
xkb_atom_text(ctx, expr->field_ref.field));
return false;
case EXPR_ARRAY_REF:
bogus = "array reference";
/* fallthrough */
case EXPR_ACTION_DECL:
if (bogus == NULL)
bogus = "function use";
log_err(ctx,
"Unexpected %s in mask expression; Expression Ignored\n",
bogus);
return false;
case EXPR_ADD:
case EXPR_SUBTRACT:
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
left = expr->binary.left;
right = expr->binary.right;
if (!ExprResolveMaskLookup(ctx, left, &l, lookup, lookupPriv) ||
!ExprResolveMaskLookup(ctx, right, &r, lookup, lookupPriv))
return false;
switch (expr->expr.op) {
case EXPR_ADD:
*val_rtrn = l | r;
break;
case EXPR_SUBTRACT:
*val_rtrn = l & (~r);
break;
case EXPR_MULTIPLY:
case EXPR_DIVIDE:
log_err(ctx, "Cannot %s masks; Illegal operation ignored\n",
(expr->expr.op == EXPR_DIVIDE ? "divide" : "multiply"));
return false;
default:
break;
}
return true;
case EXPR_ASSIGN:
log_wsgo(ctx, "Assignment operator not implemented yet\n");
break;
case EXPR_INVERT:
left = expr->unary.child;
if (!ExprResolveIntegerLookup(ctx, left, &v, lookup, lookupPriv))
return false;
*val_rtrn = ~v;
return true;
case EXPR_UNARY_PLUS:
case EXPR_NEGATE:
case EXPR_NOT:
left = expr->unary.child;
if (!ExprResolveIntegerLookup(ctx, left, &v, lookup, lookupPriv))
log_err(ctx, "The %s operator cannot be used with a mask\n",
(expr->expr.op == EXPR_NEGATE ? "-" : "!"));
return false;
default:
log_wsgo(ctx, "Unknown operator %d in ResolveMask\n",
expr->expr.op);
break;
}
return false;
}
bool
ExprResolveMask(struct xkb_context *ctx, const ExprDef *expr,
unsigned int *mask_rtrn, const LookupEntry *values)
{
return ExprResolveMaskLookup(ctx, expr, mask_rtrn, SimpleLookup, values);
}
bool
ExprResolveModMask(struct xkb_context *ctx, const ExprDef *expr,
enum mod_type mod_type, const struct xkb_mod_set *mods,
xkb_mod_mask_t *mask_rtrn)
{
LookupModMaskPriv priv = { .mods = mods, .mod_type = mod_type };
return ExprResolveMaskLookup(ctx, expr, mask_rtrn, LookupModMask, &priv);
}
bool
ExprResolveKeySym(struct xkb_context *ctx, const ExprDef *expr,
xkb_keysym_t *sym_rtrn)
{
int val;
if (expr->expr.op == EXPR_IDENT) {
const char *str = xkb_atom_text(ctx, expr->ident.ident);
*sym_rtrn = xkb_keysym_from_name(str, 0);
if (*sym_rtrn != XKB_KEY_NoSymbol)
return true;
}
if (!ExprResolveInteger(ctx, expr, &val))
return false;
if (val < 0 || val >= 10)
return false;
*sym_rtrn = XKB_KEY_0 + (xkb_keysym_t) val;
return true;
}
bool
ExprResolveMod(struct xkb_context *ctx, const ExprDef *def,
enum mod_type mod_type, const struct xkb_mod_set *mods,
xkb_mod_index_t *ndx_rtrn)
{
xkb_mod_index_t ndx;
xkb_atom_t name;
if (def->expr.op != EXPR_IDENT) {
log_err(ctx,
"Cannot resolve virtual modifier: "
"found %s where a virtual modifier name was expected\n",
expr_op_type_to_string(def->expr.op));
return false;
}
name = def->ident.ident;
ndx = XkbModNameToIndex(mods, name, mod_type);
if (ndx == XKB_MOD_INVALID) {
log_err(ctx,
"Cannot resolve virtual modifier: "
"\"%s\" was not previously declared\n",
xkb_atom_text(ctx, name));
return false;
}
*ndx_rtrn = ndx;
return true;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_322_0 |
crossvul-cpp_data_good_3959_1 | // SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher
* Mark Cave-Ayland, Carlo E Prelz, Dick Streefland
* Copyright (c) 2002, 2003 Tuukka Toivonen
* Copyright (c) 2008 Erik Andrén
*
* P/N 861037: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0010: Sensor HDCS1000 ASIC STV0600
* P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express
* P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam
* P/N 861075-0040: Sensor HDCS1000 ASIC
* P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB
* P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web
*/
/*
* The spec file for the PB-0100 suggests the following for best quality
* images after the sensor has been reset :
*
* PB_ADCGAINL = R60 = 0x03 (3 dec) : sets low reference of ADC
to produce good black level
* PB_PREADCTRL = R32 = 0x1400 (5120 dec) : Enables global gain changes
through R53
* PB_ADCMINGAIN = R52 = 0x10 (16 dec) : Sets the minimum gain for
auto-exposure
* PB_ADCGLOBALGAIN = R53 = 0x10 (16 dec) : Sets the global gain
* PB_EXPGAIN = R14 = 0x11 (17 dec) : Sets the auto-exposure value
* PB_UPDATEINT = R23 = 0x02 (2 dec) : Sets the speed on
auto-exposure routine
* PB_CFILLIN = R5 = 0x0E (14 dec) : Sets the frame rate
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "stv06xx_pb0100.h"
struct pb0100_ctrls {
struct { /* one big happy control cluster... */
struct v4l2_ctrl *autogain;
struct v4l2_ctrl *gain;
struct v4l2_ctrl *exposure;
struct v4l2_ctrl *red;
struct v4l2_ctrl *blue;
struct v4l2_ctrl *natural;
};
struct v4l2_ctrl *target;
};
static struct v4l2_pix_format pb0100_mode[] = {
/* low res / subsample modes disabled as they are only half res horizontal,
halving the vertical resolution does not seem to work */
{
320,
240,
V4L2_PIX_FMT_SGRBG8,
V4L2_FIELD_NONE,
.sizeimage = 320 * 240,
.bytesperline = 320,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = PB0100_CROP_TO_VGA
},
{
352,
288,
V4L2_PIX_FMT_SGRBG8,
V4L2_FIELD_NONE,
.sizeimage = 352 * 288,
.bytesperline = 352,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0
}
};
static int pb0100_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
struct sd *sd = (struct sd *)gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
int err = -EINVAL;
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
err = pb0100_set_autogain(gspca_dev, ctrl->val);
if (err)
break;
if (ctrl->val)
break;
err = pb0100_set_gain(gspca_dev, ctrls->gain->val);
if (err)
break;
err = pb0100_set_exposure(gspca_dev, ctrls->exposure->val);
break;
case V4L2_CTRL_CLASS_USER + 0x1001:
err = pb0100_set_autogain_target(gspca_dev, ctrl->val);
break;
}
return err;
}
static const struct v4l2_ctrl_ops pb0100_ctrl_ops = {
.s_ctrl = pb0100_s_ctrl,
};
static int pb0100_init_controls(struct sd *sd)
{
struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
struct pb0100_ctrls *ctrls;
static const struct v4l2_ctrl_config autogain_target = {
.ops = &pb0100_ctrl_ops,
.id = V4L2_CTRL_CLASS_USER + 0x1000,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Automatic Gain Target",
.max = 255,
.step = 1,
.def = 128,
};
static const struct v4l2_ctrl_config natural_light = {
.ops = &pb0100_ctrl_ops,
.id = V4L2_CTRL_CLASS_USER + 0x1001,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Natural Light Source",
.max = 1,
.step = 1,
.def = 1,
};
ctrls = kzalloc(sizeof(*ctrls), GFP_KERNEL);
if (!ctrls)
return -ENOMEM;
v4l2_ctrl_handler_init(hdl, 6);
ctrls->autogain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
ctrls->exposure = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 511, 1, 12);
ctrls->gain = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_GAIN, 0, 255, 1, 128);
ctrls->red = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_RED_BALANCE, -255, 255, 1, 0);
ctrls->blue = v4l2_ctrl_new_std(hdl, &pb0100_ctrl_ops,
V4L2_CID_BLUE_BALANCE, -255, 255, 1, 0);
ctrls->natural = v4l2_ctrl_new_custom(hdl, &natural_light, NULL);
ctrls->target = v4l2_ctrl_new_custom(hdl, &autogain_target, NULL);
if (hdl->error) {
kfree(ctrls);
return hdl->error;
}
sd->sensor_priv = ctrls;
v4l2_ctrl_auto_cluster(5, &ctrls->autogain, 0, false);
return 0;
}
static int pb0100_probe(struct sd *sd)
{
u16 sensor;
int err;
err = stv06xx_read_sensor(sd, PB_IDENT, &sensor);
if (err < 0)
return -ENODEV;
if ((sensor >> 8) != 0x64)
return -ENODEV;
pr_info("Photobit pb0100 sensor detected\n");
sd->gspca_dev.cam.cam_mode = pb0100_mode;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(pb0100_mode);
return 0;
}
static int pb0100_start(struct sd *sd)
{
int err, packet_size, max_packet_size;
struct usb_host_interface *alt;
struct usb_interface *intf;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
struct cam *cam = &sd->gspca_dev.cam;
u32 mode = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt)
return -ENODEV;
if (alt->desc.bNumEndpoints < 1)
return -ENODEV;
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
/* If we don't have enough bandwidth use a lower framerate */
max_packet_size = sd->sensor->max_packet_size[sd->gspca_dev.curr_mode];
if (packet_size < max_packet_size)
stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1));
else
stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(5)|BIT(3)|BIT(1));
/* Setup sensor window */
if (mode & PB0100_CROP_TO_VGA) {
stv06xx_write_sensor(sd, PB_RSTART, 30);
stv06xx_write_sensor(sd, PB_CSTART, 20);
stv06xx_write_sensor(sd, PB_RWSIZE, 240 - 1);
stv06xx_write_sensor(sd, PB_CWSIZE, 320 - 1);
} else {
stv06xx_write_sensor(sd, PB_RSTART, 8);
stv06xx_write_sensor(sd, PB_CSTART, 4);
stv06xx_write_sensor(sd, PB_RWSIZE, 288 - 1);
stv06xx_write_sensor(sd, PB_CWSIZE, 352 - 1);
}
if (mode & PB0100_SUBSAMPLE) {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x02); /* Wrong, FIXME */
stv06xx_write_bridge(sd, STV_X_CTRL, 0x06);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x10);
} else {
stv06xx_write_bridge(sd, STV_Y_CTRL, 0x01);
stv06xx_write_bridge(sd, STV_X_CTRL, 0x0a);
/* larger -> slower */
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0x20);
}
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3)|BIT(1));
gspca_dbg(gspca_dev, D_STREAM, "Started stream, status: %d\n", err);
return (err < 0) ? err : 0;
}
static int pb0100_stop(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int err;
err = stv06xx_write_sensor(sd, PB_ABORTFRAME, 1);
if (err < 0)
goto out;
/* Set bit 1 to zero */
err = stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
gspca_dbg(gspca_dev, D_STREAM, "Halting stream\n");
out:
return (err < 0) ? err : 0;
}
/* FIXME: Sort the init commands out and put them into tables,
this is only for getting the camera to work */
/* FIXME: No error handling for now,
add this once the init has been converted to proper tables */
static int pb0100_init(struct sd *sd)
{
stv06xx_write_bridge(sd, STV_REG00, 1);
stv06xx_write_bridge(sd, STV_SCAN_RATE, 0);
/* Reset sensor */
stv06xx_write_sensor(sd, PB_RESET, 1);
stv06xx_write_sensor(sd, PB_RESET, 0);
/* Disable chip */
stv06xx_write_sensor(sd, PB_CONTROL, BIT(5)|BIT(3));
/* Gain stuff...*/
stv06xx_write_sensor(sd, PB_PREADCTRL, BIT(12)|BIT(10)|BIT(6));
stv06xx_write_sensor(sd, PB_ADCGLOBALGAIN, 12);
/* Set up auto-exposure */
/* ADC VREF_HI new setting for a transition
from the Expose1 to the Expose2 setting */
stv06xx_write_sensor(sd, PB_R28, 12);
/* gain max for autoexposure */
stv06xx_write_sensor(sd, PB_ADCMAXGAIN, 180);
/* gain min for autoexposure */
stv06xx_write_sensor(sd, PB_ADCMINGAIN, 12);
/* Maximum frame integration time (programmed into R8)
allowed for auto-exposure routine */
stv06xx_write_sensor(sd, PB_R54, 3);
/* Minimum frame integration time (programmed into R8)
allowed for auto-exposure routine */
stv06xx_write_sensor(sd, PB_R55, 0);
stv06xx_write_sensor(sd, PB_UPDATEINT, 1);
/* R15 Expose0 (maximum that auto-exposure may use) */
stv06xx_write_sensor(sd, PB_R15, 800);
/* R17 Expose2 (minimum that auto-exposure may use) */
stv06xx_write_sensor(sd, PB_R17, 10);
stv06xx_write_sensor(sd, PB_EXPGAIN, 0);
/* 0x14 */
stv06xx_write_sensor(sd, PB_VOFFSET, 0);
/* 0x0D */
stv06xx_write_sensor(sd, PB_ADCGAINH, 11);
/* Set black level (important!) */
stv06xx_write_sensor(sd, PB_ADCGAINL, 0);
/* ??? */
stv06xx_write_bridge(sd, STV_REG00, 0x11);
stv06xx_write_bridge(sd, STV_REG03, 0x45);
stv06xx_write_bridge(sd, STV_REG04, 0x07);
/* Scan/timing for the sensor */
stv06xx_write_sensor(sd, PB_ROWSPEED, BIT(4)|BIT(3)|BIT(1));
stv06xx_write_sensor(sd, PB_CFILLIN, 14);
stv06xx_write_sensor(sd, PB_VBL, 0);
stv06xx_write_sensor(sd, PB_FINTTIME, 0);
stv06xx_write_sensor(sd, PB_RINTTIME, 123);
stv06xx_write_bridge(sd, STV_REG01, 0xc2);
stv06xx_write_bridge(sd, STV_REG02, 0xb0);
return 0;
}
static int pb0100_dump(struct sd *sd)
{
return 0;
}
static int pb0100_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
err = stv06xx_write_sensor(sd, PB_G1GAIN, val);
if (!err)
err = stv06xx_write_sensor(sd, PB_G2GAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set green gain to %d, status: %d\n",
val, err);
if (!err)
err = pb0100_set_red_balance(gspca_dev, ctrls->red->val);
if (!err)
err = pb0100_set_blue_balance(gspca_dev, ctrls->blue->val);
return err;
}
static int pb0100_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
val = 255;
err = stv06xx_write_sensor(sd, PB_RGAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set red gain to %d, status: %d\n",
val, err);
return err;
}
static int pb0100_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
val += ctrls->gain->val;
if (val < 0)
val = 0;
else if (val > 255)
val = 255;
err = stv06xx_write_sensor(sd, PB_BGAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set blue gain to %d, status: %d\n",
val, err);
return err;
}
static int pb0100_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
int err;
err = stv06xx_write_sensor(sd, PB_RINTTIME, val);
gspca_dbg(gspca_dev, D_CONF, "Set exposure to %d, status: %d\n",
val, err);
return err;
}
static int pb0100_set_autogain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
struct sd *sd = (struct sd *) gspca_dev;
struct pb0100_ctrls *ctrls = sd->sensor_priv;
if (val) {
if (ctrls->natural->val)
val = BIT(6)|BIT(4)|BIT(0);
else
val = BIT(4)|BIT(0);
} else
val = 0;
err = stv06xx_write_sensor(sd, PB_EXPGAIN, val);
gspca_dbg(gspca_dev, D_CONF, "Set autogain to %d (natural: %d), status: %d\n",
val, ctrls->natural->val, err);
return err;
}
static int pb0100_set_autogain_target(struct gspca_dev *gspca_dev, __s32 val)
{
int err, totalpixels, brightpixels, darkpixels;
struct sd *sd = (struct sd *) gspca_dev;
/* Number of pixels counted by the sensor when subsampling the pixels.
* Slightly larger than the real value to avoid oscillation */
totalpixels = gspca_dev->pixfmt.width * gspca_dev->pixfmt.height;
totalpixels = totalpixels/(8*8) + totalpixels/(64*64);
brightpixels = (totalpixels * val) >> 8;
darkpixels = totalpixels - brightpixels;
err = stv06xx_write_sensor(sd, PB_R21, brightpixels);
if (!err)
err = stv06xx_write_sensor(sd, PB_R22, darkpixels);
gspca_dbg(gspca_dev, D_CONF, "Set autogain target to %d, status: %d\n",
val, err);
return err;
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_3959_1 |
crossvul-cpp_data_bad_3140_0 | /***************************************************************************\
* *
* BitlBee - An IRC to IM gateway *
* libpurple module - File transfer stuff *
* *
* Copyright 2009-2010 Wilmer van der Gaast <wilmer@gaast.net> *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. *
* *
\***************************************************************************/
/* Do file transfers via disk for now, since libpurple was really designed
for straight-to/from disk fts and is only just learning how to pass the
file contents the the UI instead (2.6.0 and higher it seems, and with
varying levels of success). */
#include "bitlbee.h"
#include "bpurple.h"
#include <stdarg.h>
#include <glib.h>
#include <purple.h>
struct prpl_xfer_data {
PurpleXfer *xfer;
file_transfer_t *ft;
struct im_connection *ic;
int fd;
char *fn, *handle;
gboolean ui_wants_data;
int timeout;
};
static file_transfer_t *next_ft;
struct im_connection *purple_ic_by_pa(PurpleAccount *pa);
static gboolean prplcb_xfer_new_send_cb(gpointer data, gint fd, b_input_condition cond);
static gboolean prpl_xfer_write_request(struct file_transfer *ft);
/* Receiving files (IM->UI): */
static void prpl_xfer_accept(struct file_transfer *ft)
{
struct prpl_xfer_data *px = ft->data;
purple_xfer_request_accepted(px->xfer, NULL);
prpl_xfer_write_request(ft);
}
static void prpl_xfer_canceled(struct file_transfer *ft, char *reason)
{
struct prpl_xfer_data *px = ft->data;
if (px->xfer) {
if (!purple_xfer_is_completed(px->xfer) && !purple_xfer_is_canceled(px->xfer)) {
purple_xfer_cancel_local(px->xfer);
}
px->xfer->ui_data = NULL;
purple_xfer_unref(px->xfer);
px->xfer = NULL;
}
}
static void prpl_xfer_free(struct file_transfer *ft)
{
struct prpl_xfer_data *px = ft->data;
struct purple_data *pd = px->ic->proto_data;
pd->filetransfers = g_slist_remove(pd->filetransfers, px);
if (px->xfer) {
px->xfer->ui_data = NULL;
purple_xfer_unref(px->xfer);
}
if (px->timeout) {
b_event_remove(px->timeout);
}
g_free(px->fn);
g_free(px->handle);
if (px->fd >= 0) {
close(px->fd);
}
g_free(px);
}
static void prplcb_xfer_new(PurpleXfer *xfer)
{
purple_xfer_ref(xfer);
if (purple_xfer_get_type(xfer) == PURPLE_XFER_RECEIVE) {
struct prpl_xfer_data *px = g_new0(struct prpl_xfer_data, 1);
struct purple_data *pd;
xfer->ui_data = px;
px->xfer = xfer;
px->fn = mktemp(g_strdup("/tmp/bitlbee-purple-ft.XXXXXX"));
px->fd = -1;
px->ic = purple_ic_by_pa(xfer->account);
pd = px->ic->proto_data;
pd->filetransfers = g_slist_prepend(pd->filetransfers, px);
purple_xfer_set_local_filename(xfer, px->fn);
/* Sadly the xfer struct is still empty ATM so come back after
the caller is done. */
b_timeout_add(0, prplcb_xfer_new_send_cb, xfer);
} else {
struct file_transfer *ft = next_ft;
struct prpl_xfer_data *px = ft->data;
xfer->ui_data = px;
px->xfer = xfer;
next_ft = NULL;
}
}
static gboolean prplcb_xfer_new_send_cb(gpointer data, gint fd, b_input_condition cond)
{
PurpleXfer *xfer = data;
struct im_connection *ic = purple_ic_by_pa(xfer->account);
struct prpl_xfer_data *px = xfer->ui_data;
PurpleBuddy *buddy;
const char *who;
buddy = purple_find_buddy(xfer->account, xfer->who);
who = buddy ? purple_buddy_get_name(buddy) : xfer->who;
/* TODO(wilmer): After spreading some more const goodness in BitlBee,
remove the evil cast below. */
px->ft = imcb_file_send_start(ic, (char *) who, xfer->filename, xfer->size);
px->ft->data = px;
px->ft->accept = prpl_xfer_accept;
px->ft->canceled = prpl_xfer_canceled;
px->ft->free = prpl_xfer_free;
px->ft->write_request = prpl_xfer_write_request;
return FALSE;
}
gboolean try_write_to_ui(gpointer data, gint fd, b_input_condition cond)
{
struct file_transfer *ft = data;
struct prpl_xfer_data *px = ft->data;
struct stat fs;
off_t tx_bytes;
/* If we don't have the file opened yet, there's no data so wait. */
if (px->fd < 0 || !px->ui_wants_data) {
return FALSE;
}
tx_bytes = lseek(px->fd, 0, SEEK_CUR);
fstat(px->fd, &fs);
if (fs.st_size > tx_bytes) {
char buf[1024];
size_t n = MIN(fs.st_size - tx_bytes, sizeof(buf));
if (read(px->fd, buf, n) == n && ft->write(ft, buf, n)) {
px->ui_wants_data = FALSE;
} else {
purple_xfer_cancel_local(px->xfer);
imcb_file_canceled(px->ic, ft, "Read error");
}
}
if (lseek(px->fd, 0, SEEK_CUR) == px->xfer->size) {
/*purple_xfer_end( px->xfer );*/
imcb_file_finished(px->ic, ft);
}
return FALSE;
}
/* UI calls this when its buffer is empty and wants more data to send to the user. */
static gboolean prpl_xfer_write_request(struct file_transfer *ft)
{
struct prpl_xfer_data *px = ft->data;
px->ui_wants_data = TRUE;
try_write_to_ui(ft, 0, 0);
return FALSE;
}
static void prplcb_xfer_destroy(PurpleXfer *xfer)
{
struct prpl_xfer_data *px = xfer->ui_data;
if (px) {
px->xfer = NULL;
}
}
static void prplcb_xfer_progress(PurpleXfer *xfer, double percent)
{
struct prpl_xfer_data *px = xfer->ui_data;
if (px == NULL) {
return;
}
if (purple_xfer_get_type(xfer) == PURPLE_XFER_SEND) {
if (*px->fn) {
char *slash;
unlink(px->fn);
if ((slash = strrchr(px->fn, '/'))) {
*slash = '\0';
rmdir(px->fn);
}
*px->fn = '\0';
}
return;
}
if (px->fd == -1 && percent > 0) {
/* Weeeeeeeee, we're getting data! That means the file exists
by now so open it and start sending to the UI. */
px->fd = open(px->fn, O_RDONLY);
/* Unlink it now, because we don't need it after this. */
unlink(px->fn);
}
if (percent < 1) {
try_write_to_ui(px->ft, 0, 0);
} else {
/* Another nice problem: If we have the whole file, it only
gets closed when we return. Problem: There may still be
stuff buffered and not written, we'll only see it after
the caller close()s the file. So poll the file after that. */
b_timeout_add(0, try_write_to_ui, px->ft);
}
}
static void prplcb_xfer_cancel_remote(PurpleXfer *xfer)
{
struct prpl_xfer_data *px = xfer->ui_data;
if (px && px->ft) {
imcb_file_canceled(px->ic, px->ft, "Canceled by remote end");
} else if (px) {
/* px->ft == NULL for sends, because of the two stages. :-/ */
imcb_error(px->ic, "File transfer cancelled by remote end");
}
}
/* Sending files (UI->IM): */
static gboolean prpl_xfer_write(struct file_transfer *ft, char *buffer, unsigned int len);
static gboolean purple_transfer_request_cb(gpointer data, gint fd, b_input_condition cond);
void purple_transfer_request(struct im_connection *ic, file_transfer_t *ft, char *handle)
{
struct prpl_xfer_data *px = g_new0(struct prpl_xfer_data, 1);
struct purple_data *pd;
char *dir, *basename;
ft->data = px;
px->ft = ft;
px->ft->free = prpl_xfer_free;
dir = g_strdup("/tmp/bitlbee-purple-ft.XXXXXX");
if (!mkdtemp(dir)) {
imcb_error(ic, "Could not create temporary file for file transfer");
g_free(px);
g_free(dir);
return;
}
if ((basename = strrchr(ft->file_name, '/'))) {
basename++;
} else {
basename = ft->file_name;
}
px->fn = g_strdup_printf("%s/%s", dir, basename);
px->fd = open(px->fn, O_WRONLY | O_CREAT, 0600);
g_free(dir);
if (px->fd < 0) {
imcb_error(ic, "Could not create temporary file for file transfer");
g_free(px);
g_free(px->fn);
return;
}
px->ic = ic;
px->handle = g_strdup(handle);
pd = px->ic->proto_data;
pd->filetransfers = g_slist_prepend(pd->filetransfers, px);
imcb_log(ic,
"Due to libpurple limitations, the file has to be cached locally before proceeding with the actual file transfer. Please wait...");
px->timeout = b_timeout_add(0, purple_transfer_request_cb, ft);
}
static void purple_transfer_forward(struct file_transfer *ft)
{
struct prpl_xfer_data *px = ft->data;
struct purple_data *pd = px->ic->proto_data;
/* xfer_new() will pick up this variable. It's a hack but we're not
multi-threaded anyway. */
next_ft = ft;
serv_send_file(purple_account_get_connection(pd->account),
px->handle, px->fn);
}
static gboolean purple_transfer_request_cb(gpointer data, gint fd, b_input_condition cond)
{
file_transfer_t *ft = data;
struct prpl_xfer_data *px = ft->data;
px->timeout = 0;
if (ft->write == NULL) {
ft->write = prpl_xfer_write;
imcb_file_recv_start(px->ic, ft);
}
ft->write_request(ft);
return FALSE;
}
static gboolean prpl_xfer_write(struct file_transfer *ft, char *buffer, unsigned int len)
{
struct prpl_xfer_data *px = ft->data;
if (write(px->fd, buffer, len) != len) {
imcb_file_canceled(px->ic, ft, "Error while writing temporary file");
return FALSE;
}
if (lseek(px->fd, 0, SEEK_CUR) >= ft->file_size) {
close(px->fd);
px->fd = -1;
purple_transfer_forward(ft);
imcb_file_finished(px->ic, ft);
px->ft = NULL;
} else {
px->timeout = b_timeout_add(0, purple_transfer_request_cb, ft);
}
return TRUE;
}
void purple_transfer_cancel_all(struct im_connection *ic)
{
struct purple_data *pd = ic->proto_data;
while (pd->filetransfers) {
struct prpl_xfer_data *px = pd->filetransfers->data;
if (px->ft) {
imcb_file_canceled(ic, px->ft, "Logging out");
}
pd->filetransfers = g_slist_remove(pd->filetransfers, px);
}
}
PurpleXferUiOps bee_xfer_uiops =
{
prplcb_xfer_new, /* new_xfer */
prplcb_xfer_destroy, /* destroy */
NULL, /* add_xfer */
prplcb_xfer_progress, /* update_progress */
NULL, /* cancel_local */
prplcb_xfer_cancel_remote, /* cancel_remote */
NULL, /* ui_write */
NULL, /* ui_read */
NULL, /* data_not_sent */
};
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3140_0 |
crossvul-cpp_data_good_4849_3 | /*
* Copyright (c) 1999-2000 Image Power, Inc. and the University of
* British Columbia.
* Copyright (c) 2001-2004 Michael David Adams.
* All rights reserved.
*/
/* __START_OF_JASPER_LICENSE__
*
* JasPer License Version 2.0
*
* Copyright (c) 2001-2006 Michael David Adams
* Copyright (c) 1999-2000 Image Power, Inc.
* Copyright (c) 1999-2000 The University of British Columbia
*
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person (the
* "User") obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without restriction,
* including without limitation the rights to use, copy, modify, merge,
* publish, distribute, and/or sell copies of the Software, and to permit
* persons to whom the Software is furnished to do so, subject to the
* following conditions:
*
* 1. The above copyright notices and this permission notice (which
* includes the disclaimer below) shall be included in all copies or
* substantial portions of the Software.
*
* 2. The name of a copyright holder shall not be used to endorse or
* promote products derived from the Software without specific prior
* written permission.
*
* THIS DISCLAIMER OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS
* LICENSE. NO USE OF THE SOFTWARE IS AUTHORIZED HEREUNDER EXCEPT UNDER
* THIS DISCLAIMER. THE SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS
* "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
* BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL
* INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING
* FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
* NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
* WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. NO ASSURANCES ARE
* PROVIDED BY THE COPYRIGHT HOLDERS THAT THE SOFTWARE DOES NOT INFRINGE
* THE PATENT OR OTHER INTELLECTUAL PROPERTY RIGHTS OF ANY OTHER ENTITY.
* EACH COPYRIGHT HOLDER DISCLAIMS ANY LIABILITY TO THE USER FOR CLAIMS
* BROUGHT BY ANY OTHER ENTITY BASED ON INFRINGEMENT OF INTELLECTUAL
* PROPERTY RIGHTS OR OTHERWISE. AS A CONDITION TO EXERCISING THE RIGHTS
* GRANTED HEREUNDER, EACH USER HEREBY ASSUMES SOLE RESPONSIBILITY TO SECURE
* ANY OTHER INTELLECTUAL PROPERTY RIGHTS NEEDED, IF ANY. THE SOFTWARE
* IS NOT FAULT-TOLERANT AND IS NOT INTENDED FOR USE IN MISSION-CRITICAL
* SYSTEMS, SUCH AS THOSE USED IN THE OPERATION OF NUCLEAR FACILITIES,
* AIRCRAFT NAVIGATION OR COMMUNICATION SYSTEMS, AIR TRAFFIC CONTROL
* SYSTEMS, DIRECT LIFE SUPPORT MACHINES, OR WEAPONS SYSTEMS, IN WHICH
* THE FAILURE OF THE SOFTWARE OR SYSTEM COULD LEAD DIRECTLY TO DEATH,
* PERSONAL INJURY, OR SEVERE PHYSICAL OR ENVIRONMENTAL DAMAGE ("HIGH
* RISK ACTIVITIES"). THE COPYRIGHT HOLDERS SPECIFICALLY DISCLAIM ANY
* EXPRESS OR IMPLIED WARRANTY OF FITNESS FOR HIGH RISK ACTIVITIES.
*
* __END_OF_JASPER_LICENSE__
*/
/*
* Tree-Structured Filter Bank (TSFB) Library
*
* $Id$
*/
/******************************************************************************\
* Includes.
\******************************************************************************/
#include <assert.h>
#include "jasper/jas_malloc.h"
#include "jasper/jas_seq.h"
#include "jpc_tsfb.h"
#include "jpc_cod.h"
#include "jpc_cs.h"
#include "jpc_util.h"
#include "jpc_math.h"
void jpc_tsfb_getbands2(jpc_tsfb_t *tsfb, int locxstart, int locystart,
int xstart, int ystart, int xend, int yend, jpc_tsfb_band_t **bands,
int numlvls);
/******************************************************************************\
*
\******************************************************************************/
jpc_tsfb_t *jpc_cod_gettsfb(int qmfbid, int numlvls)
{
jpc_tsfb_t *tsfb;
if (!(tsfb = malloc(sizeof(jpc_tsfb_t))))
return 0;
if (numlvls > 0) {
switch (qmfbid) {
case JPC_COX_INS:
tsfb->qmfb = &jpc_ns_qmfb2d;
break;
default:
case JPC_COX_RFT:
tsfb->qmfb = &jpc_ft_qmfb2d;
break;
}
} else {
tsfb->qmfb = 0;
}
tsfb->numlvls = numlvls;
return tsfb;
}
void jpc_tsfb_destroy(jpc_tsfb_t *tsfb)
{
free(tsfb);
}
int jpc_tsfb_analyze(jpc_tsfb_t *tsfb, jas_seq2d_t *a)
{
return (tsfb->numlvls > 0) ? jpc_tsfb_analyze2(tsfb, jas_seq2d_getref(a,
jas_seq2d_xstart(a), jas_seq2d_ystart(a)), jas_seq2d_xstart(a),
jas_seq2d_ystart(a), jas_seq2d_width(a),
jas_seq2d_height(a), jas_seq2d_rowstep(a), tsfb->numlvls - 1) : 0;
}
int jpc_tsfb_analyze2(jpc_tsfb_t *tsfb, int *a, int xstart, int ystart,
int width, int height, int stride, int numlvls)
{
if (width > 0 && height > 0) {
if ((*tsfb->qmfb->analyze)(a, xstart, ystart, width, height, stride))
return -1;
if (numlvls > 0) {
if (jpc_tsfb_analyze2(tsfb, a, JPC_CEILDIVPOW2(xstart,
1), JPC_CEILDIVPOW2(ystart, 1), JPC_CEILDIVPOW2(
xstart + width, 1) - JPC_CEILDIVPOW2(xstart, 1),
JPC_CEILDIVPOW2(ystart + height, 1) -
JPC_CEILDIVPOW2(ystart, 1), stride, numlvls - 1)) {
return -1;
}
}
}
return 0;
}
int jpc_tsfb_synthesize(jpc_tsfb_t *tsfb, jas_seq2d_t *a)
{
return (tsfb->numlvls > 0 && jas_seq2d_size(a)) ?
jpc_tsfb_synthesize2(tsfb,
jas_seq2d_getref(a, jas_seq2d_xstart(a), jas_seq2d_ystart(a)),
jas_seq2d_xstart(a), jas_seq2d_ystart(a), jas_seq2d_width(a),
jas_seq2d_height(a), jas_seq2d_rowstep(a), tsfb->numlvls - 1) : 0;
}
int jpc_tsfb_synthesize2(jpc_tsfb_t *tsfb, int *a, int xstart, int ystart,
int width, int height, int stride, int numlvls)
{
if (numlvls > 0) {
if (jpc_tsfb_synthesize2(tsfb, a, JPC_CEILDIVPOW2(xstart, 1),
JPC_CEILDIVPOW2(ystart, 1), JPC_CEILDIVPOW2(xstart + width,
1) - JPC_CEILDIVPOW2(xstart, 1), JPC_CEILDIVPOW2(ystart +
height, 1) - JPC_CEILDIVPOW2(ystart, 1), stride, numlvls -
1)) {
return -1;
}
}
if (width > 0 && height > 0) {
if ((*tsfb->qmfb->synthesize)(a, xstart, ystart, width, height, stride)) {
return -1;
}
}
return 0;
}
int jpc_tsfb_getbands(jpc_tsfb_t *tsfb, uint_fast32_t xstart,
uint_fast32_t ystart, uint_fast32_t xend, uint_fast32_t yend,
jpc_tsfb_band_t *bands)
{
jpc_tsfb_band_t *band;
band = bands;
if (tsfb->numlvls > 0) {
jpc_tsfb_getbands2(tsfb, xstart, ystart, xstart, ystart, xend, yend,
&band, tsfb->numlvls);
} else {
band->xstart = xstart;
band->ystart = ystart;
band->xend = xend;
band->yend = yend;
band->locxstart = xstart;
band->locystart = ystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_LL;
band->synenergywt = JPC_FIX_ONE;
++band;
}
return band - bands;
}
void jpc_tsfb_getbands2(jpc_tsfb_t *tsfb, int locxstart, int locystart,
int xstart, int ystart, int xend, int yend, jpc_tsfb_band_t **bands,
int numlvls)
{
int newxstart;
int newystart;
int newxend;
int newyend;
jpc_tsfb_band_t *band;
newxstart = JPC_CEILDIVPOW2(xstart, 1);
newystart = JPC_CEILDIVPOW2(ystart, 1);
newxend = JPC_CEILDIVPOW2(xend, 1);
newyend = JPC_CEILDIVPOW2(yend, 1);
if (numlvls > 0) {
jpc_tsfb_getbands2(tsfb, locxstart, locystart, newxstart, newystart,
newxend, newyend, bands, numlvls - 1);
band = *bands;
band->xstart = JPC_FLOORDIVPOW2(xstart, 1);
band->ystart = newystart;
band->xend = JPC_FLOORDIVPOW2(xend, 1);
band->yend = newyend;
band->locxstart = locxstart + newxend - newxstart;
band->locystart = locystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_HL;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls] * tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls]);
++(*bands);
band = *bands;
band->xstart = newxstart;
band->ystart = JPC_FLOORDIVPOW2(ystart, 1);
band->xend = newxend;
band->yend = JPC_FLOORDIVPOW2(yend, 1);
band->locxstart = locxstart;
band->locystart = locystart + newyend - newystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_LH;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls] * tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls]);
++(*bands);
band = *bands;
band->xstart = JPC_FLOORDIVPOW2(xstart, 1);
band->ystart = JPC_FLOORDIVPOW2(ystart, 1);
band->xend = JPC_FLOORDIVPOW2(xend, 1);
band->yend = JPC_FLOORDIVPOW2(yend, 1);
band->locxstart = locxstart + newxend - newxstart;
band->locystart = locystart + newyend - newystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_HH;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls] * tsfb->qmfb->hpenergywts[
tsfb->numlvls - numlvls]);
++(*bands);
} else {
band = *bands;
band->xstart = xstart;
band->ystart = ystart;
band->xend = xend;
band->yend = yend;
band->locxstart = locxstart;
band->locystart = locystart;
band->locxend = band->locxstart + band->xend - band->xstart;
band->locyend = band->locystart + band->yend - band->ystart;
band->orient = JPC_TSFB_LL;
band->synenergywt = jpc_dbltofix(tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls - 1] * tsfb->qmfb->lpenergywts[
tsfb->numlvls - numlvls - 1]);
++(*bands);
}
}
| ./CrossVul/dataset_final_sorted/CWE-476/c/good_4849_3 |
crossvul-cpp_data_bad_2835_0 | /*
* LEGO USB Tower driver
*
* Copyright (C) 2003 David Glance <davidgsf@sourceforge.net>
* 2001-2004 Juergen Stuber <starblue@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* derived from USB Skeleton driver - 0.5
* Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
*
* History:
*
* 2001-10-13 - 0.1 js
* - first version
* 2001-11-03 - 0.2 js
* - simplified buffering, one-shot URBs for writing
* 2001-11-10 - 0.3 js
* - removed IOCTL (setting power/mode is more complicated, postponed)
* 2001-11-28 - 0.4 js
* - added vendor commands for mode of operation and power level in open
* 2001-12-04 - 0.5 js
* - set IR mode by default (by oversight 0.4 set VLL mode)
* 2002-01-11 - 0.5? pcchan
* - make read buffer reusable and work around bytes_to_write issue between
* uhci and legusbtower
* 2002-09-23 - 0.52 david (david@csse.uwa.edu.au)
* - imported into lejos project
* - changed wake_up to wake_up_interruptible
* - changed to use lego0 rather than tower0
* - changed dbg() to use __func__ rather than deprecated __func__
* 2003-01-12 - 0.53 david (david@csse.uwa.edu.au)
* - changed read and write to write everything or
* timeout (from a patch by Chris Riesen and Brett Thaeler driver)
* - added ioctl functionality to set timeouts
* 2003-07-18 - 0.54 davidgsf (david@csse.uwa.edu.au)
* - initial import into LegoUSB project
* - merge of existing LegoUSB.c driver
* 2003-07-18 - 0.56 davidgsf (david@csse.uwa.edu.au)
* - port to 2.6 style driver
* 2004-02-29 - 0.6 Juergen Stuber <starblue@users.sourceforge.net>
* - fix locking
* - unlink read URBs which are no longer needed
* - allow increased buffer size, eliminates need for timeout on write
* - have read URB running continuously
* - added poll
* - forbid seeking
* - added nonblocking I/O
* - changed back __func__ to __func__
* - read and log tower firmware version
* - reset tower on probe, avoids failure of first write
* 2004-03-09 - 0.7 Juergen Stuber <starblue@users.sourceforge.net>
* - timeout read now only after inactivity, shorten default accordingly
* 2004-03-11 - 0.8 Juergen Stuber <starblue@users.sourceforge.net>
* - log major, minor instead of possibly confusing device filename
* - whitespace cleanup
* 2004-03-12 - 0.9 Juergen Stuber <starblue@users.sourceforge.net>
* - normalize whitespace in debug messages
* - take care about endianness in control message responses
* 2004-03-13 - 0.91 Juergen Stuber <starblue@users.sourceforge.net>
* - make default intervals longer to accommodate current EHCI driver
* 2004-03-19 - 0.92 Juergen Stuber <starblue@users.sourceforge.net>
* - replaced atomic_t by memory barriers
* 2004-04-21 - 0.93 Juergen Stuber <starblue@users.sourceforge.net>
* - wait for completion of write urb in release (needed for remotecontrol)
* - corrected poll for write direction (missing negation)
* 2004-04-22 - 0.94 Juergen Stuber <starblue@users.sourceforge.net>
* - make device locking interruptible
* 2004-04-30 - 0.95 Juergen Stuber <starblue@users.sourceforge.net>
* - check for valid udev on resubmitting and unlinking urbs
* 2004-08-03 - 0.96 Juergen Stuber <starblue@users.sourceforge.net>
* - move reset into open to clean out spurious data
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <linux/usb.h>
#include <linux/poll.h>
/* Version Information */
#define DRIVER_VERSION "v0.96"
#define DRIVER_AUTHOR "Juergen Stuber <starblue@sourceforge.net>"
#define DRIVER_DESC "LEGO USB Tower Driver"
/* The defaults are chosen to work with the latest versions of leJOS and NQC.
*/
/* Some legacy software likes to receive packets in one piece.
* In this case read_buffer_size should exceed the maximal packet length
* (417 for datalog uploads), and packet_timeout should be set.
*/
static int read_buffer_size = 480;
module_param(read_buffer_size, int, 0);
MODULE_PARM_DESC(read_buffer_size, "Read buffer size");
/* Some legacy software likes to send packets in one piece.
* In this case write_buffer_size should exceed the maximal packet length
* (417 for firmware and program downloads).
* A problem with long writes is that the following read may time out
* if the software is not prepared to wait long enough.
*/
static int write_buffer_size = 480;
module_param(write_buffer_size, int, 0);
MODULE_PARM_DESC(write_buffer_size, "Write buffer size");
/* Some legacy software expects reads to contain whole LASM packets.
* To achieve this, characters which arrive before a packet timeout
* occurs will be returned in a single read operation.
* A problem with long reads is that the software may time out
* if it is not prepared to wait long enough.
* The packet timeout should be greater than the time between the
* reception of subsequent characters, which should arrive about
* every 5ms for the standard 2400 baud.
* Set it to 0 to disable.
*/
static int packet_timeout = 50;
module_param(packet_timeout, int, 0);
MODULE_PARM_DESC(packet_timeout, "Packet timeout in ms");
/* Some legacy software expects blocking reads to time out.
* Timeout occurs after the specified time of read and write inactivity.
* Set it to 0 to disable.
*/
static int read_timeout = 200;
module_param(read_timeout, int, 0);
MODULE_PARM_DESC(read_timeout, "Read timeout in ms");
/* As of kernel version 2.6.4 ehci-hcd uses an
* "only one interrupt transfer per frame" shortcut
* to simplify the scheduling of periodic transfers.
* This conflicts with our standard 1ms intervals for in and out URBs.
* We use default intervals of 2ms for in and 8ms for out transfers,
* which is fast enough for 2400 baud and allows a small additional load.
* Increase the interval to allow more devices that do interrupt transfers,
* or set to 0 to use the standard interval from the endpoint descriptors.
*/
static int interrupt_in_interval = 2;
module_param(interrupt_in_interval, int, 0);
MODULE_PARM_DESC(interrupt_in_interval, "Interrupt in interval in ms");
static int interrupt_out_interval = 8;
module_param(interrupt_out_interval, int, 0);
MODULE_PARM_DESC(interrupt_out_interval, "Interrupt out interval in ms");
/* Define these values to match your device */
#define LEGO_USB_TOWER_VENDOR_ID 0x0694
#define LEGO_USB_TOWER_PRODUCT_ID 0x0001
/* Vendor requests */
#define LEGO_USB_TOWER_REQUEST_RESET 0x04
#define LEGO_USB_TOWER_REQUEST_GET_VERSION 0xFD
struct tower_reset_reply {
__le16 size; /* little-endian */
__u8 err_code;
__u8 spare;
} __attribute__ ((packed));
struct tower_get_version_reply {
__le16 size; /* little-endian */
__u8 err_code;
__u8 spare;
__u8 major;
__u8 minor;
__le16 build_no; /* little-endian */
} __attribute__ ((packed));
/* table of devices that work with this driver */
static const struct usb_device_id tower_table[] = {
{ USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, tower_table);
static DEFINE_MUTEX(open_disc_mutex);
#define LEGO_USB_TOWER_MINOR_BASE 160
/* Structure to hold all of our device specific stuff */
struct lego_usb_tower {
struct mutex lock; /* locks this structure */
struct usb_device* udev; /* save off the usb device pointer */
unsigned char minor; /* the starting minor number for this device */
int open_count; /* number of times this port has been opened */
char* read_buffer;
size_t read_buffer_length; /* this much came in */
size_t read_packet_length; /* this much will be returned on read */
spinlock_t read_buffer_lock;
int packet_timeout_jiffies;
unsigned long read_last_arrival;
wait_queue_head_t read_wait;
wait_queue_head_t write_wait;
char* interrupt_in_buffer;
struct usb_endpoint_descriptor* interrupt_in_endpoint;
struct urb* interrupt_in_urb;
int interrupt_in_interval;
int interrupt_in_running;
int interrupt_in_done;
char* interrupt_out_buffer;
struct usb_endpoint_descriptor* interrupt_out_endpoint;
struct urb* interrupt_out_urb;
int interrupt_out_interval;
int interrupt_out_busy;
};
/* local function prototypes */
static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos);
static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos);
static inline void tower_delete (struct lego_usb_tower *dev);
static int tower_open (struct inode *inode, struct file *file);
static int tower_release (struct inode *inode, struct file *file);
static unsigned int tower_poll (struct file *file, poll_table *wait);
static loff_t tower_llseek (struct file *file, loff_t off, int whence);
static void tower_abort_transfers (struct lego_usb_tower *dev);
static void tower_check_for_read_packet (struct lego_usb_tower *dev);
static void tower_interrupt_in_callback (struct urb *urb);
static void tower_interrupt_out_callback (struct urb *urb);
static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id);
static void tower_disconnect (struct usb_interface *interface);
/* file operations needed when we register this driver */
static const struct file_operations tower_fops = {
.owner = THIS_MODULE,
.read = tower_read,
.write = tower_write,
.open = tower_open,
.release = tower_release,
.poll = tower_poll,
.llseek = tower_llseek,
};
static char *legousbtower_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
/*
* usb class driver info in order to get a minor number from the usb core,
* and to have the device registered with the driver core
*/
static struct usb_class_driver tower_class = {
.name = "legousbtower%d",
.devnode = legousbtower_devnode,
.fops = &tower_fops,
.minor_base = LEGO_USB_TOWER_MINOR_BASE,
};
/* usb specific object needed to register this driver with the usb subsystem */
static struct usb_driver tower_driver = {
.name = "legousbtower",
.probe = tower_probe,
.disconnect = tower_disconnect,
.id_table = tower_table,
};
/**
* lego_usb_tower_debug_data
*/
static inline void lego_usb_tower_debug_data(struct device *dev,
const char *function, int size,
const unsigned char *data)
{
dev_dbg(dev, "%s - length = %d, data = %*ph\n",
function, size, size, data);
}
/**
* tower_delete
*/
static inline void tower_delete (struct lego_usb_tower *dev)
{
tower_abort_transfers (dev);
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
kfree (dev->read_buffer);
kfree (dev->interrupt_in_buffer);
kfree (dev->interrupt_out_buffer);
kfree (dev);
}
/**
* tower_open
*/
static int tower_open (struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev = NULL;
int subminor;
int retval = 0;
struct usb_interface *interface;
struct tower_reset_reply reset_reply;
int result;
nonseekable_open(inode, file);
subminor = iminor(inode);
interface = usb_find_interface (&tower_driver, subminor);
if (!interface) {
pr_err("error, can't find device for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
mutex_lock(&open_disc_mutex);
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&open_disc_mutex);
retval = -ENODEV;
goto exit;
}
/* lock this device */
if (mutex_lock_interruptible(&dev->lock)) {
mutex_unlock(&open_disc_mutex);
retval = -ERESTARTSYS;
goto exit;
}
/* allow opening only once */
if (dev->open_count) {
mutex_unlock(&open_disc_mutex);
retval = -EBUSY;
goto unlock_exit;
}
dev->open_count = 1;
mutex_unlock(&open_disc_mutex);
/* reset the tower */
result = usb_control_msg (dev->udev,
usb_rcvctrlpipe(dev->udev, 0),
LEGO_USB_TOWER_REQUEST_RESET,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
&reset_reply,
sizeof(reset_reply),
1000);
if (result < 0) {
dev_err(&dev->udev->dev,
"LEGO USB Tower reset control request failed\n");
retval = result;
goto unlock_exit;
}
/* initialize in direction */
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
usb_fill_int_urb (dev->interrupt_in_urb,
dev->udev,
usb_rcvintpipe(dev->udev, dev->interrupt_in_endpoint->bEndpointAddress),
dev->interrupt_in_buffer,
usb_endpoint_maxp(dev->interrupt_in_endpoint),
tower_interrupt_in_callback,
dev,
dev->interrupt_in_interval);
dev->interrupt_in_running = 1;
dev->interrupt_in_done = 0;
mb();
retval = usb_submit_urb (dev->interrupt_in_urb, GFP_KERNEL);
if (retval) {
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_in_urb %d\n", retval);
dev->interrupt_in_running = 0;
dev->open_count = 0;
goto unlock_exit;
}
/* save device in the file's private structure */
file->private_data = dev;
unlock_exit:
mutex_unlock(&dev->lock);
exit:
return retval;
}
/**
* tower_release
*/
static int tower_release (struct inode *inode, struct file *file)
{
struct lego_usb_tower *dev;
int retval = 0;
dev = file->private_data;
if (dev == NULL) {
retval = -ENODEV;
goto exit_nolock;
}
mutex_lock(&open_disc_mutex);
if (mutex_lock_interruptible(&dev->lock)) {
retval = -ERESTARTSYS;
goto exit;
}
if (dev->open_count != 1) {
dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
__func__);
retval = -ENODEV;
goto unlock_exit;
}
if (dev->udev == NULL) {
/* the device was unplugged before the file was released */
/* unlock here as tower_delete frees dev */
mutex_unlock(&dev->lock);
tower_delete (dev);
goto exit;
}
/* wait until write transfer is finished */
if (dev->interrupt_out_busy) {
wait_event_interruptible_timeout (dev->write_wait, !dev->interrupt_out_busy, 2 * HZ);
}
tower_abort_transfers (dev);
dev->open_count = 0;
unlock_exit:
mutex_unlock(&dev->lock);
exit:
mutex_unlock(&open_disc_mutex);
exit_nolock:
return retval;
}
/**
* tower_abort_transfers
* aborts transfers and frees associated data structures
*/
static void tower_abort_transfers (struct lego_usb_tower *dev)
{
if (dev == NULL)
return;
/* shutdown transfer */
if (dev->interrupt_in_running) {
dev->interrupt_in_running = 0;
mb();
if (dev->udev)
usb_kill_urb (dev->interrupt_in_urb);
}
if (dev->interrupt_out_busy && dev->udev)
usb_kill_urb(dev->interrupt_out_urb);
}
/**
* tower_check_for_read_packet
*
* To get correct semantics for signals and non-blocking I/O
* with packetizing we pretend not to see any data in the read buffer
* until it has been there unchanged for at least
* dev->packet_timeout_jiffies, or until the buffer is full.
*/
static void tower_check_for_read_packet (struct lego_usb_tower *dev)
{
spin_lock_irq (&dev->read_buffer_lock);
if (!packet_timeout
|| time_after(jiffies, dev->read_last_arrival + dev->packet_timeout_jiffies)
|| dev->read_buffer_length == read_buffer_size) {
dev->read_packet_length = dev->read_buffer_length;
}
dev->interrupt_in_done = 0;
spin_unlock_irq (&dev->read_buffer_lock);
}
/**
* tower_poll
*/
static unsigned int tower_poll (struct file *file, poll_table *wait)
{
struct lego_usb_tower *dev;
unsigned int mask = 0;
dev = file->private_data;
if (!dev->udev)
return POLLERR | POLLHUP;
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
tower_check_for_read_packet(dev);
if (dev->read_packet_length > 0) {
mask |= POLLIN | POLLRDNORM;
}
if (!dev->interrupt_out_busy) {
mask |= POLLOUT | POLLWRNORM;
}
return mask;
}
/**
* tower_llseek
*/
static loff_t tower_llseek (struct file *file, loff_t off, int whence)
{
return -ESPIPE; /* unseekable */
}
/**
* tower_read
*/
static ssize_t tower_read (struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_read;
int i;
int retval = 0;
unsigned long timeout = 0;
dev = file->private_data;
/* lock this object */
if (mutex_lock_interruptible(&dev->lock)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->udev == NULL) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* verify that we actually have some data to read */
if (count == 0) {
dev_dbg(&dev->udev->dev, "read request of 0 bytes\n");
goto unlock_exit;
}
if (read_timeout) {
timeout = jiffies + msecs_to_jiffies(read_timeout);
}
/* wait for data */
tower_check_for_read_packet (dev);
while (dev->read_packet_length == 0) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible_timeout(dev->read_wait, dev->interrupt_in_done, dev->packet_timeout_jiffies);
if (retval < 0) {
goto unlock_exit;
}
/* reset read timeout during read or write activity */
if (read_timeout
&& (dev->read_buffer_length || dev->interrupt_out_busy)) {
timeout = jiffies + msecs_to_jiffies(read_timeout);
}
/* check for read timeout */
if (read_timeout && time_after (jiffies, timeout)) {
retval = -ETIMEDOUT;
goto unlock_exit;
}
tower_check_for_read_packet (dev);
}
/* copy the data from read_buffer into userspace */
bytes_to_read = min(count, dev->read_packet_length);
if (copy_to_user (buffer, dev->read_buffer, bytes_to_read)) {
retval = -EFAULT;
goto unlock_exit;
}
spin_lock_irq (&dev->read_buffer_lock);
dev->read_buffer_length -= bytes_to_read;
dev->read_packet_length -= bytes_to_read;
for (i=0; i<dev->read_buffer_length; i++) {
dev->read_buffer[i] = dev->read_buffer[i+bytes_to_read];
}
spin_unlock_irq (&dev->read_buffer_lock);
retval = bytes_to_read;
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->lock);
exit:
return retval;
}
/**
* tower_write
*/
static ssize_t tower_write (struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
struct lego_usb_tower *dev;
size_t bytes_to_write;
int retval = 0;
dev = file->private_data;
/* lock this object */
if (mutex_lock_interruptible(&dev->lock)) {
retval = -ERESTARTSYS;
goto exit;
}
/* verify that the device wasn't unplugged */
if (dev->udev == NULL) {
retval = -ENODEV;
pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* verify that we actually have some data to write */
if (count == 0) {
dev_dbg(&dev->udev->dev, "write request of 0 bytes\n");
goto unlock_exit;
}
/* wait until previous transfer is finished */
while (dev->interrupt_out_busy) {
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto unlock_exit;
}
retval = wait_event_interruptible (dev->write_wait, !dev->interrupt_out_busy);
if (retval) {
goto unlock_exit;
}
}
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min_t(int, count, write_buffer_size);
dev_dbg(&dev->udev->dev, "%s: count = %Zd, bytes_to_write = %Zd\n",
__func__, count, bytes_to_write);
if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
goto unlock_exit;
}
/* send off the urb */
usb_fill_int_urb(dev->interrupt_out_urb,
dev->udev,
usb_sndintpipe(dev->udev, dev->interrupt_out_endpoint->bEndpointAddress),
dev->interrupt_out_buffer,
bytes_to_write,
tower_interrupt_out_callback,
dev,
dev->interrupt_out_interval);
dev->interrupt_out_busy = 1;
wmb();
retval = usb_submit_urb (dev->interrupt_out_urb, GFP_KERNEL);
if (retval) {
dev->interrupt_out_busy = 0;
dev_err(&dev->udev->dev,
"Couldn't submit interrupt_out_urb %d\n", retval);
goto unlock_exit;
}
retval = bytes_to_write;
unlock_exit:
/* unlock the device */
mutex_unlock(&dev->lock);
exit:
return retval;
}
/**
* tower_interrupt_in_callback
*/
static void tower_interrupt_in_callback (struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
int retval;
lego_usb_tower_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
if (status) {
if (status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN) {
goto exit;
} else {
dev_dbg(&dev->udev->dev,
"%s: nonzero status received: %d\n", __func__,
status);
goto resubmit; /* maybe we can recover */
}
}
if (urb->actual_length > 0) {
spin_lock (&dev->read_buffer_lock);
if (dev->read_buffer_length + urb->actual_length < read_buffer_size) {
memcpy (dev->read_buffer + dev->read_buffer_length,
dev->interrupt_in_buffer,
urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev->read_last_arrival = jiffies;
dev_dbg(&dev->udev->dev, "%s: received %d bytes\n",
__func__, urb->actual_length);
} else {
pr_warn("read_buffer overflow, %d bytes dropped\n",
urb->actual_length);
}
spin_unlock (&dev->read_buffer_lock);
}
resubmit:
/* resubmit if we're still running */
if (dev->interrupt_in_running && dev->udev) {
retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
if (retval)
dev_err(&dev->udev->dev,
"%s: usb_submit_urb failed (%d)\n",
__func__, retval);
}
exit:
dev->interrupt_in_done = 1;
wake_up_interruptible (&dev->read_wait);
}
/**
* tower_interrupt_out_callback
*/
static void tower_interrupt_out_callback (struct urb *urb)
{
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
lego_usb_tower_debug_data(&dev->udev->dev, __func__,
urb->actual_length, urb->transfer_buffer);
/* sync/async unlink faults aren't errors */
if (status && !(status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN)) {
dev_dbg(&dev->udev->dev,
"%s: nonzero write bulk status received: %d\n", __func__,
status);
}
dev->interrupt_out_busy = 0;
wake_up_interruptible(&dev->write_wait);
}
/**
* tower_probe
*
* Called by the usb core when a new device is connected that it thinks
* this driver might be interested in.
*/
static int tower_probe (struct usb_interface *interface, const struct usb_device_id *id)
{
struct device *idev = &interface->dev;
struct usb_device *udev = interface_to_usbdev(interface);
struct lego_usb_tower *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor* endpoint;
struct tower_get_version_reply get_version_reply;
int i;
int retval = -ENOMEM;
int result;
/* allocate memory for our device state and initialize it */
dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
if (!dev)
goto exit;
mutex_init(&dev->lock);
dev->udev = udev;
dev->open_count = 0;
dev->read_buffer = NULL;
dev->read_buffer_length = 0;
dev->read_packet_length = 0;
spin_lock_init (&dev->read_buffer_lock);
dev->packet_timeout_jiffies = msecs_to_jiffies(packet_timeout);
dev->read_last_arrival = jiffies;
init_waitqueue_head (&dev->read_wait);
init_waitqueue_head (&dev->write_wait);
dev->interrupt_in_buffer = NULL;
dev->interrupt_in_endpoint = NULL;
dev->interrupt_in_urb = NULL;
dev->interrupt_in_running = 0;
dev->interrupt_in_done = 0;
dev->interrupt_out_buffer = NULL;
dev->interrupt_out_endpoint = NULL;
dev->interrupt_out_urb = NULL;
dev->interrupt_out_busy = 0;
iface_desc = interface->cur_altsetting;
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (usb_endpoint_xfer_int(endpoint)) {
if (usb_endpoint_dir_in(endpoint))
dev->interrupt_in_endpoint = endpoint;
else
dev->interrupt_out_endpoint = endpoint;
}
}
if(dev->interrupt_in_endpoint == NULL) {
dev_err(idev, "interrupt in endpoint not found\n");
goto error;
}
if (dev->interrupt_out_endpoint == NULL) {
dev_err(idev, "interrupt out endpoint not found\n");
goto error;
}
dev->read_buffer = kmalloc (read_buffer_size, GFP_KERNEL);
if (!dev->read_buffer)
goto error;
dev->interrupt_in_buffer = kmalloc (usb_endpoint_maxp(dev->interrupt_in_endpoint), GFP_KERNEL);
if (!dev->interrupt_in_buffer)
goto error;
dev->interrupt_in_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_in_urb)
goto error;
dev->interrupt_out_buffer = kmalloc (write_buffer_size, GFP_KERNEL);
if (!dev->interrupt_out_buffer)
goto error;
dev->interrupt_out_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->interrupt_out_urb)
goto error;
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
/* we can register the device now, as it is ready */
usb_set_intfdata (interface, dev);
retval = usb_register_dev (interface, &tower_class);
if (retval) {
/* something prevented us from registering this driver */
dev_err(idev, "Not able to get a minor for this device.\n");
usb_set_intfdata (interface, NULL);
goto error;
}
dev->minor = interface->minor;
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "LEGO USB Tower #%d now attached to major "
"%d minor %d\n", (dev->minor - LEGO_USB_TOWER_MINOR_BASE),
USB_MAJOR, dev->minor);
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
LEGO_USB_TOWER_REQUEST_GET_VERSION,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
&get_version_reply,
sizeof(get_version_reply),
1000);
if (result < 0) {
dev_err(idev, "LEGO USB Tower get version control request failed\n");
retval = result;
goto error;
}
dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
"build %d\n", get_version_reply.major,
get_version_reply.minor,
le16_to_cpu(get_version_reply.build_no));
exit:
return retval;
error:
tower_delete(dev);
return retval;
}
/**
* tower_disconnect
*
* Called by the usb core when the device is removed from the system.
*/
static void tower_disconnect (struct usb_interface *interface)
{
struct lego_usb_tower *dev;
int minor;
dev = usb_get_intfdata (interface);
mutex_lock(&open_disc_mutex);
usb_set_intfdata (interface, NULL);
minor = dev->minor;
/* give back our minor */
usb_deregister_dev (interface, &tower_class);
mutex_lock(&dev->lock);
mutex_unlock(&open_disc_mutex);
/* if the device is not opened, then we clean up right now */
if (!dev->open_count) {
mutex_unlock(&dev->lock);
tower_delete (dev);
} else {
dev->udev = NULL;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
mutex_unlock(&dev->lock);
}
dev_info(&interface->dev, "LEGO USB Tower #%d now disconnected\n",
(minor - LEGO_USB_TOWER_MINOR_BASE));
}
module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
#ifdef MODULE_LICENSE
MODULE_LICENSE("GPL");
#endif
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_2835_0 |
crossvul-cpp_data_bad_3958_0 | // SPDX-License-Identifier: GPL-2.0-or-later
/*
* OV519 driver
*
* Copyright (C) 2008-2011 Jean-François Moine <moinejf@free.fr>
* Copyright (C) 2009 Hans de Goede <hdegoede@redhat.com>
*
* This module is adapted from the ov51x-jpeg package, which itself
* was adapted from the ov511 driver.
*
* Original copyright for the ov511 driver is:
*
* Copyright (c) 1999-2006 Mark W. McClelland
* Support for OV519, OV8610 Copyright (c) 2003 Joerg Heckenbach
* Many improvements by Bret Wallach <bwallac1@san.rr.com>
* Color fixes by by Orion Sky Lawlor <olawlor@acm.org> (2/26/2000)
* OV7620 fixes by Charl P. Botha <cpbotha@ieee.org>
* Changes by Claudio Matsuoka <claudio@conectiva.com>
*
* ov51x-jpeg original copyright is:
*
* Copyright (c) 2004-2007 Romain Beauxis <toots@rastageeks.org>
* Support for OV7670 sensors was contributed by Sam Skipsey <aoanla@yahoo.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "ov519"
#include <linux/input.h>
#include "gspca.h"
/* The jpeg_hdr is used by w996Xcf only */
/* The CONEX_CAM define for jpeg.h needs renaming, now its used here too */
#define CONEX_CAM
#include "jpeg.h"
MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>");
MODULE_DESCRIPTION("OV519 USB Camera Driver");
MODULE_LICENSE("GPL");
/* global parameters */
static int frame_rate;
/* Number of times to retry a failed I2C transaction. Increase this if you
* are getting "Failed to read sensor ID..." */
static int i2c_detect_tries = 10;
/* ov519 device descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
struct v4l2_ctrl *jpegqual;
struct v4l2_ctrl *freq;
struct { /* h/vflip control cluster */
struct v4l2_ctrl *hflip;
struct v4l2_ctrl *vflip;
};
struct { /* autobrightness/brightness control cluster */
struct v4l2_ctrl *autobright;
struct v4l2_ctrl *brightness;
};
u8 revision;
u8 packet_nr;
char bridge;
#define BRIDGE_OV511 0
#define BRIDGE_OV511PLUS 1
#define BRIDGE_OV518 2
#define BRIDGE_OV518PLUS 3
#define BRIDGE_OV519 4 /* = ov530 */
#define BRIDGE_OVFX2 5
#define BRIDGE_W9968CF 6
#define BRIDGE_MASK 7
char invert_led;
#define BRIDGE_INVERT_LED 8
char snapshot_pressed;
char snapshot_needs_reset;
/* Determined by sensor type */
u8 sif;
#define QUALITY_MIN 50
#define QUALITY_MAX 70
#define QUALITY_DEF 50
u8 stopped; /* Streaming is temporarily paused */
u8 first_frame;
u8 frame_rate; /* current Framerate */
u8 clockdiv; /* clockdiv override */
s8 sensor; /* Type of image sensor chip (SEN_*) */
u8 sensor_addr;
u16 sensor_width;
u16 sensor_height;
s16 sensor_reg_cache[256];
u8 jpeg_hdr[JPEG_HDR_SZ];
};
enum sensors {
SEN_OV2610,
SEN_OV2610AE,
SEN_OV3610,
SEN_OV6620,
SEN_OV6630,
SEN_OV66308AF,
SEN_OV7610,
SEN_OV7620,
SEN_OV7620AE,
SEN_OV7640,
SEN_OV7648,
SEN_OV7660,
SEN_OV7670,
SEN_OV76BE,
SEN_OV8610,
SEN_OV9600,
};
/* Note this is a bit of a hack, but the w9968cf driver needs the code for all
the ov sensors which is already present here. When we have the time we
really should move the sensor drivers to v4l2 sub drivers. */
#include "w996Xcf.c"
/* table of the disabled controls */
struct ctrl_valid {
unsigned int has_brightness:1;
unsigned int has_contrast:1;
unsigned int has_exposure:1;
unsigned int has_autogain:1;
unsigned int has_sat:1;
unsigned int has_hvflip:1;
unsigned int has_autobright:1;
unsigned int has_freq:1;
};
static const struct ctrl_valid valid_controls[] = {
[SEN_OV2610] = {
.has_exposure = 1,
.has_autogain = 1,
},
[SEN_OV2610AE] = {
.has_exposure = 1,
.has_autogain = 1,
},
[SEN_OV3610] = {
/* No controls */
},
[SEN_OV6620] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
.has_freq = 1,
},
[SEN_OV6630] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
.has_freq = 1,
},
[SEN_OV66308AF] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
.has_freq = 1,
},
[SEN_OV7610] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
.has_freq = 1,
},
[SEN_OV7620] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
.has_freq = 1,
},
[SEN_OV7620AE] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
.has_freq = 1,
},
[SEN_OV7640] = {
.has_brightness = 1,
.has_sat = 1,
.has_freq = 1,
},
[SEN_OV7648] = {
.has_brightness = 1,
.has_sat = 1,
.has_freq = 1,
},
[SEN_OV7660] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_hvflip = 1,
.has_freq = 1,
},
[SEN_OV7670] = {
.has_brightness = 1,
.has_contrast = 1,
.has_hvflip = 1,
.has_freq = 1,
},
[SEN_OV76BE] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
.has_freq = 1,
},
[SEN_OV8610] = {
.has_brightness = 1,
.has_contrast = 1,
.has_sat = 1,
.has_autobright = 1,
},
[SEN_OV9600] = {
.has_exposure = 1,
.has_autogain = 1,
},
};
static const struct v4l2_pix_format ov519_vga_mode[] = {
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ov519_sif_mode[] = {
{160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 160 * 120 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 3},
{176, 144, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2},
{352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 8 + 590,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
/* Note some of the sizeimage values for the ov511 / ov518 may seem
larger then necessary, however they need to be this big as the ov511 /
ov518 always fills the entire isoc frame, using 0 padding bytes when
it doesn't have any data. So with low framerates the amount of data
transferred can become quite large (libv4l will remove all the 0 padding
in userspace). */
static const struct v4l2_pix_format ov518_vga_mode[] = {
{320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ov518_sif_mode[] = {
{160, 120, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 70000,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 3},
{176, 144, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 70000,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{320, 240, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2},
{352, 288, V4L2_PIX_FMT_OV518, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ov511_vga_mode[] = {
{320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{640, 480, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 2,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ov511_sif_mode[] = {
{160, 120, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 70000,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 3},
{176, 144, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 70000,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 1},
{320, 240, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 2},
{352, 288, V4L2_PIX_FMT_OV511, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3,
.colorspace = V4L2_COLORSPACE_JPEG,
.priv = 0},
};
static const struct v4l2_pix_format ovfx2_ov2610_mode[] = {
{800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 800,
.sizeimage = 800 * 600,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 1600,
.sizeimage = 1600 * 1200,
.colorspace = V4L2_COLORSPACE_SRGB},
};
static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 800,
.sizeimage = 800 * 600,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 1024,
.sizeimage = 1024 * 768,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{1600, 1200, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 1600,
.sizeimage = 1600 * 1200,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
{2048, 1536, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 2048,
.sizeimage = 2048 * 1536,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
static const struct v4l2_pix_format ovfx2_ov9600_mode[] = {
{640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
.bytesperline = 1280,
.sizeimage = 1280 * 1024,
.colorspace = V4L2_COLORSPACE_SRGB},
};
/* Registers common to OV511 / OV518 */
#define R51x_FIFO_PSIZE 0x30 /* 2 bytes wide w/ OV518(+) */
#define R51x_SYS_RESET 0x50
/* Reset type flags */
#define OV511_RESET_OMNICE 0x08
#define R51x_SYS_INIT 0x53
#define R51x_SYS_SNAP 0x52
#define R51x_SYS_CUST_ID 0x5f
#define R51x_COMP_LUT_BEGIN 0x80
/* OV511 Camera interface register numbers */
#define R511_CAM_DELAY 0x10
#define R511_CAM_EDGE 0x11
#define R511_CAM_PXCNT 0x12
#define R511_CAM_LNCNT 0x13
#define R511_CAM_PXDIV 0x14
#define R511_CAM_LNDIV 0x15
#define R511_CAM_UV_EN 0x16
#define R511_CAM_LINE_MODE 0x17
#define R511_CAM_OPTS 0x18
#define R511_SNAP_FRAME 0x19
#define R511_SNAP_PXCNT 0x1a
#define R511_SNAP_LNCNT 0x1b
#define R511_SNAP_PXDIV 0x1c
#define R511_SNAP_LNDIV 0x1d
#define R511_SNAP_UV_EN 0x1e
#define R511_SNAP_OPTS 0x1f
#define R511_DRAM_FLOW_CTL 0x20
#define R511_FIFO_OPTS 0x31
#define R511_I2C_CTL 0x40
#define R511_SYS_LED_CTL 0x55 /* OV511+ only */
#define R511_COMP_EN 0x78
#define R511_COMP_LUT_EN 0x79
/* OV518 Camera interface register numbers */
#define R518_GPIO_OUT 0x56 /* OV518(+) only */
#define R518_GPIO_CTL 0x57 /* OV518(+) only */
/* OV519 Camera interface register numbers */
#define OV519_R10_H_SIZE 0x10
#define OV519_R11_V_SIZE 0x11
#define OV519_R12_X_OFFSETL 0x12
#define OV519_R13_X_OFFSETH 0x13
#define OV519_R14_Y_OFFSETL 0x14
#define OV519_R15_Y_OFFSETH 0x15
#define OV519_R16_DIVIDER 0x16
#define OV519_R20_DFR 0x20
#define OV519_R25_FORMAT 0x25
/* OV519 System Controller register numbers */
#define OV519_R51_RESET1 0x51
#define OV519_R54_EN_CLK1 0x54
#define OV519_R57_SNAPSHOT 0x57
#define OV519_GPIO_DATA_OUT0 0x71
#define OV519_GPIO_IO_CTRL0 0x72
/*#define OV511_ENDPOINT_ADDRESS 1 * Isoc endpoint number */
/*
* The FX2 chip does not give us a zero length read at end of frame.
* It does, however, give a short read at the end of a frame, if
* necessary, rather than run two frames together.
*
* By choosing the right bulk transfer size, we are guaranteed to always
* get a short read for the last read of each frame. Frame sizes are
* always a composite number (width * height, or a multiple) so if we
* choose a prime number, we are guaranteed that the last read of a
* frame will be short.
*
* But it isn't that easy: the 2.6 kernel requires a multiple of 4KB,
* otherwise EOVERFLOW "babbling" errors occur. I have not been able
* to figure out why. [PMiller]
*
* The constant (13 * 4096) is the largest "prime enough" number less than 64KB.
*
* It isn't enough to know the number of bytes per frame, in case we
* have data dropouts or buffer overruns (even though the FX2 double
* buffers, there are some pretty strict real time constraints for
* isochronous transfer for larger frame sizes).
*/
/*jfm: this value does not work for 800x600 - see isoc_init */
#define OVFX2_BULK_SIZE (13 * 4096)
/* I2C registers */
#define R51x_I2C_W_SID 0x41
#define R51x_I2C_SADDR_3 0x42
#define R51x_I2C_SADDR_2 0x43
#define R51x_I2C_R_SID 0x44
#define R51x_I2C_DATA 0x45
#define R518_I2C_CTL 0x47 /* OV518(+) only */
#define OVFX2_I2C_ADDR 0x00
/* I2C ADDRESSES */
#define OV7xx0_SID 0x42
#define OV_HIRES_SID 0x60 /* OV9xxx / OV2xxx / OV3xxx */
#define OV8xx0_SID 0xa0
#define OV6xx0_SID 0xc0
/* OV7610 registers */
#define OV7610_REG_GAIN 0x00 /* gain setting (5:0) */
#define OV7610_REG_BLUE 0x01 /* blue channel balance */
#define OV7610_REG_RED 0x02 /* red channel balance */
#define OV7610_REG_SAT 0x03 /* saturation */
#define OV8610_REG_HUE 0x04 /* 04 reserved */
#define OV7610_REG_CNT 0x05 /* Y contrast */
#define OV7610_REG_BRT 0x06 /* Y brightness */
#define OV7610_REG_COM_C 0x14 /* misc common regs */
#define OV7610_REG_ID_HIGH 0x1c /* manufacturer ID MSB */
#define OV7610_REG_ID_LOW 0x1d /* manufacturer ID LSB */
#define OV7610_REG_COM_I 0x29 /* misc settings */
/* OV7660 and OV7670 registers */
#define OV7670_R00_GAIN 0x00 /* Gain lower 8 bits (rest in vref) */
#define OV7670_R01_BLUE 0x01 /* blue gain */
#define OV7670_R02_RED 0x02 /* red gain */
#define OV7670_R03_VREF 0x03 /* Pieces of GAIN, VSTART, VSTOP */
#define OV7670_R04_COM1 0x04 /* Control 1 */
/*#define OV7670_R07_AECHH 0x07 * AEC MS 5 bits */
#define OV7670_R0C_COM3 0x0c /* Control 3 */
#define OV7670_R0D_COM4 0x0d /* Control 4 */
#define OV7670_R0E_COM5 0x0e /* All "reserved" */
#define OV7670_R0F_COM6 0x0f /* Control 6 */
#define OV7670_R10_AECH 0x10 /* More bits of AEC value */
#define OV7670_R11_CLKRC 0x11 /* Clock control */
#define OV7670_R12_COM7 0x12 /* Control 7 */
#define OV7670_COM7_FMT_VGA 0x00
/*#define OV7670_COM7_YUV 0x00 * YUV */
#define OV7670_COM7_FMT_QVGA 0x10 /* QVGA format */
#define OV7670_COM7_FMT_MASK 0x38
#define OV7670_COM7_RESET 0x80 /* Register reset */
#define OV7670_R13_COM8 0x13 /* Control 8 */
#define OV7670_COM8_AEC 0x01 /* Auto exposure enable */
#define OV7670_COM8_AWB 0x02 /* White balance enable */
#define OV7670_COM8_AGC 0x04 /* Auto gain enable */
#define OV7670_COM8_BFILT 0x20 /* Band filter enable */
#define OV7670_COM8_AECSTEP 0x40 /* Unlimited AEC step size */
#define OV7670_COM8_FASTAEC 0x80 /* Enable fast AGC/AEC */
#define OV7670_R14_COM9 0x14 /* Control 9 - gain ceiling */
#define OV7670_R15_COM10 0x15 /* Control 10 */
#define OV7670_R17_HSTART 0x17 /* Horiz start high bits */
#define OV7670_R18_HSTOP 0x18 /* Horiz stop high bits */
#define OV7670_R19_VSTART 0x19 /* Vert start high bits */
#define OV7670_R1A_VSTOP 0x1a /* Vert stop high bits */
#define OV7670_R1E_MVFP 0x1e /* Mirror / vflip */
#define OV7670_MVFP_VFLIP 0x10 /* vertical flip */
#define OV7670_MVFP_MIRROR 0x20 /* Mirror image */
#define OV7670_R24_AEW 0x24 /* AGC upper limit */
#define OV7670_R25_AEB 0x25 /* AGC lower limit */
#define OV7670_R26_VPT 0x26 /* AGC/AEC fast mode op region */
#define OV7670_R32_HREF 0x32 /* HREF pieces */
#define OV7670_R3A_TSLB 0x3a /* lots of stuff */
#define OV7670_R3B_COM11 0x3b /* Control 11 */
#define OV7670_COM11_EXP 0x02
#define OV7670_COM11_HZAUTO 0x10 /* Auto detect 50/60 Hz */
#define OV7670_R3C_COM12 0x3c /* Control 12 */
#define OV7670_R3D_COM13 0x3d /* Control 13 */
#define OV7670_COM13_GAMMA 0x80 /* Gamma enable */
#define OV7670_COM13_UVSAT 0x40 /* UV saturation auto adjustment */
#define OV7670_R3E_COM14 0x3e /* Control 14 */
#define OV7670_R3F_EDGE 0x3f /* Edge enhancement factor */
#define OV7670_R40_COM15 0x40 /* Control 15 */
/*#define OV7670_COM15_R00FF 0xc0 * 00 to FF */
#define OV7670_R41_COM16 0x41 /* Control 16 */
#define OV7670_COM16_AWBGAIN 0x08 /* AWB gain enable */
/* end of ov7660 common registers */
#define OV7670_R55_BRIGHT 0x55 /* Brightness */
#define OV7670_R56_CONTRAS 0x56 /* Contrast control */
#define OV7670_R69_GFIX 0x69 /* Fix gain control */
/*#define OV7670_R8C_RGB444 0x8c * RGB 444 control */
#define OV7670_R9F_HAECC1 0x9f /* Hist AEC/AGC control 1 */
#define OV7670_RA0_HAECC2 0xa0 /* Hist AEC/AGC control 2 */
#define OV7670_RA5_BD50MAX 0xa5 /* 50hz banding step limit */
#define OV7670_RA6_HAECC3 0xa6 /* Hist AEC/AGC control 3 */
#define OV7670_RA7_HAECC4 0xa7 /* Hist AEC/AGC control 4 */
#define OV7670_RA8_HAECC5 0xa8 /* Hist AEC/AGC control 5 */
#define OV7670_RA9_HAECC6 0xa9 /* Hist AEC/AGC control 6 */
#define OV7670_RAA_HAECC7 0xaa /* Hist AEC/AGC control 7 */
#define OV7670_RAB_BD60MAX 0xab /* 60hz banding step limit */
struct ov_regvals {
u8 reg;
u8 val;
};
struct ov_i2c_regvals {
u8 reg;
u8 val;
};
/* Settings for OV2610 camera chip */
static const struct ov_i2c_regvals norm_2610[] = {
{ 0x12, 0x80 }, /* reset */
};
static const struct ov_i2c_regvals norm_2610ae[] = {
{0x12, 0x80}, /* reset */
{0x13, 0xcd},
{0x09, 0x01},
{0x0d, 0x00},
{0x11, 0x80},
{0x12, 0x20}, /* 1600x1200 */
{0x33, 0x0c},
{0x35, 0x90},
{0x36, 0x37},
/* ms-win traces */
{0x11, 0x83}, /* clock / 3 ? */
{0x2d, 0x00}, /* 60 Hz filter */
{0x24, 0xb0}, /* normal colors */
{0x25, 0x90},
{0x10, 0x43},
};
static const struct ov_i2c_regvals norm_3620b[] = {
/*
* From the datasheet: "Note that after writing to register COMH
* (0x12) to change the sensor mode, registers related to the
* sensor’s cropping window will be reset back to their default
* values."
*
* "wait 4096 external clock ... to make sure the sensor is
* stable and ready to access registers" i.e. 160us at 24MHz
*/
{ 0x12, 0x80 }, /* COMH reset */
{ 0x12, 0x00 }, /* QXGA, master */
/*
* 11 CLKRC "Clock Rate Control"
* [7] internal frequency doublers: on
* [6] video port mode: master
* [5:0] clock divider: 1
*/
{ 0x11, 0x80 },
/*
* 13 COMI "Common Control I"
* = 192 (0xC0) 11000000
* COMI[7] "AEC speed selection"
* = 1 (0x01) 1....... "Faster AEC correction"
* COMI[6] "AEC speed step selection"
* = 1 (0x01) .1...... "Big steps, fast"
* COMI[5] "Banding filter on off"
* = 0 (0x00) ..0..... "Off"
* COMI[4] "Banding filter option"
* = 0 (0x00) ...0.... "Main clock is 48 MHz and
* the PLL is ON"
* COMI[3] "Reserved"
* = 0 (0x00) ....0...
* COMI[2] "AGC auto manual control selection"
* = 0 (0x00) .....0.. "Manual"
* COMI[1] "AWB auto manual control selection"
* = 0 (0x00) ......0. "Manual"
* COMI[0] "Exposure control"
* = 0 (0x00) .......0 "Manual"
*/
{ 0x13, 0xc0 },
/*
* 09 COMC "Common Control C"
* = 8 (0x08) 00001000
* COMC[7:5] "Reserved"
* = 0 (0x00) 000.....
* COMC[4] "Sleep Mode Enable"
* = 0 (0x00) ...0.... "Normal mode"
* COMC[3:2] "Sensor sampling reset timing selection"
* = 2 (0x02) ....10.. "Longer reset time"
* COMC[1:0] "Output drive current select"
* = 0 (0x00) ......00 "Weakest"
*/
{ 0x09, 0x08 },
/*
* 0C COMD "Common Control D"
* = 8 (0x08) 00001000
* COMD[7] "Reserved"
* = 0 (0x00) 0.......
* COMD[6] "Swap MSB and LSB at the output port"
* = 0 (0x00) .0...... "False"
* COMD[5:3] "Reserved"
* = 1 (0x01) ..001...
* COMD[2] "Output Average On Off"
* = 0 (0x00) .....0.. "Output Normal"
* COMD[1] "Sensor precharge voltage selection"
* = 0 (0x00) ......0. "Selects internal
* reference precharge
* voltage"
* COMD[0] "Snapshot option"
* = 0 (0x00) .......0 "Enable live video output
* after snapshot sequence"
*/
{ 0x0c, 0x08 },
/*
* 0D COME "Common Control E"
* = 161 (0xA1) 10100001
* COME[7] "Output average option"
* = 1 (0x01) 1....... "Output average of 4 pixels"
* COME[6] "Anti-blooming control"
* = 0 (0x00) .0...... "Off"
* COME[5:3] "Reserved"
* = 4 (0x04) ..100...
* COME[2] "Clock output power down pin status"
* = 0 (0x00) .....0.. "Tri-state data output pin
* on power down"
* COME[1] "Data output pin status selection at power down"
* = 0 (0x00) ......0. "Tri-state VSYNC, PCLK,
* HREF, and CHSYNC pins on
* power down"
* COME[0] "Auto zero circuit select"
* = 1 (0x01) .......1 "On"
*/
{ 0x0d, 0xa1 },
/*
* 0E COMF "Common Control F"
* = 112 (0x70) 01110000
* COMF[7] "System clock selection"
* = 0 (0x00) 0....... "Use 24 MHz system clock"
* COMF[6:4] "Reserved"
* = 7 (0x07) .111....
* COMF[3] "Manual auto negative offset canceling selection"
* = 0 (0x00) ....0... "Auto detect negative
* offset and cancel it"
* COMF[2:0] "Reserved"
* = 0 (0x00) .....000
*/
{ 0x0e, 0x70 },
/*
* 0F COMG "Common Control G"
* = 66 (0x42) 01000010
* COMG[7] "Optical black output selection"
* = 0 (0x00) 0....... "Disable"
* COMG[6] "Black level calibrate selection"
* = 1 (0x01) .1...... "Use optical black pixels
* to calibrate"
* COMG[5:4] "Reserved"
* = 0 (0x00) ..00....
* COMG[3] "Channel offset adjustment"
* = 0 (0x00) ....0... "Disable offset adjustment"
* COMG[2] "ADC black level calibration option"
* = 0 (0x00) .....0.. "Use B/G line and G/R
* line to calibrate each
* channel's black level"
* COMG[1] "Reserved"
* = 1 (0x01) ......1.
* COMG[0] "ADC black level calibration enable"
* = 0 (0x00) .......0 "Disable"
*/
{ 0x0f, 0x42 },
/*
* 14 COMJ "Common Control J"
* = 198 (0xC6) 11000110
* COMJ[7:6] "AGC gain ceiling"
* = 3 (0x03) 11...... "8x"
* COMJ[5:4] "Reserved"
* = 0 (0x00) ..00....
* COMJ[3] "Auto banding filter"
* = 0 (0x00) ....0... "Banding filter is always
* on off depending on
* COMI[5] setting"
* COMJ[2] "VSYNC drop option"
* = 1 (0x01) .....1.. "SYNC is dropped if frame
* data is dropped"
* COMJ[1] "Frame data drop"
* = 1 (0x01) ......1. "Drop frame data if
* exposure is not within
* tolerance. In AEC mode,
* data is normally dropped
* when data is out of
* range."
* COMJ[0] "Reserved"
* = 0 (0x00) .......0
*/
{ 0x14, 0xc6 },
/*
* 15 COMK "Common Control K"
* = 2 (0x02) 00000010
* COMK[7] "CHSYNC pin output swap"
* = 0 (0x00) 0....... "CHSYNC"
* COMK[6] "HREF pin output swap"
* = 0 (0x00) .0...... "HREF"
* COMK[5] "PCLK output selection"
* = 0 (0x00) ..0..... "PCLK always output"
* COMK[4] "PCLK edge selection"
* = 0 (0x00) ...0.... "Data valid on falling edge"
* COMK[3] "HREF output polarity"
* = 0 (0x00) ....0... "positive"
* COMK[2] "Reserved"
* = 0 (0x00) .....0..
* COMK[1] "VSYNC polarity"
* = 1 (0x01) ......1. "negative"
* COMK[0] "HSYNC polarity"
* = 0 (0x00) .......0 "positive"
*/
{ 0x15, 0x02 },
/*
* 33 CHLF "Current Control"
* = 9 (0x09) 00001001
* CHLF[7:6] "Sensor current control"
* = 0 (0x00) 00......
* CHLF[5] "Sensor current range control"
* = 0 (0x00) ..0..... "normal range"
* CHLF[4] "Sensor current"
* = 0 (0x00) ...0.... "normal current"
* CHLF[3] "Sensor buffer current control"
* = 1 (0x01) ....1... "half current"
* CHLF[2] "Column buffer current control"
* = 0 (0x00) .....0.. "normal current"
* CHLF[1] "Analog DSP current control"
* = 0 (0x00) ......0. "normal current"
* CHLF[1] "ADC current control"
* = 0 (0x00) ......0. "normal current"
*/
{ 0x33, 0x09 },
/*
* 34 VBLM "Blooming Control"
* = 80 (0x50) 01010000
* VBLM[7] "Hard soft reset switch"
* = 0 (0x00) 0....... "Hard reset"
* VBLM[6:4] "Blooming voltage selection"
* = 5 (0x05) .101....
* VBLM[3:0] "Sensor current control"
* = 0 (0x00) ....0000
*/
{ 0x34, 0x50 },
/*
* 36 VCHG "Sensor Precharge Voltage Control"
* = 0 (0x00) 00000000
* VCHG[7] "Reserved"
* = 0 (0x00) 0.......
* VCHG[6:4] "Sensor precharge voltage control"
* = 0 (0x00) .000....
* VCHG[3:0] "Sensor array common reference"
* = 0 (0x00) ....0000
*/
{ 0x36, 0x00 },
/*
* 37 ADC "ADC Reference Control"
* = 4 (0x04) 00000100
* ADC[7:4] "Reserved"
* = 0 (0x00) 0000....
* ADC[3] "ADC input signal range"
* = 0 (0x00) ....0... "Input signal 1.0x"
* ADC[2:0] "ADC range control"
* = 4 (0x04) .....100
*/
{ 0x37, 0x04 },
/*
* 38 ACOM "Analog Common Ground"
* = 82 (0x52) 01010010
* ACOM[7] "Analog gain control"
* = 0 (0x00) 0....... "Gain 1x"
* ACOM[6] "Analog black level calibration"
* = 1 (0x01) .1...... "On"
* ACOM[5:0] "Reserved"
* = 18 (0x12) ..010010
*/
{ 0x38, 0x52 },
/*
* 3A FREFA "Internal Reference Adjustment"
* = 0 (0x00) 00000000
* FREFA[7:0] "Range"
* = 0 (0x00) 00000000
*/
{ 0x3a, 0x00 },
/*
* 3C FVOPT "Internal Reference Adjustment"
* = 31 (0x1F) 00011111
* FVOPT[7:0] "Range"
* = 31 (0x1F) 00011111
*/
{ 0x3c, 0x1f },
/*
* 44 Undocumented = 0 (0x00) 00000000
* 44[7:0] "It's a secret"
* = 0 (0x00) 00000000
*/
{ 0x44, 0x00 },
/*
* 40 Undocumented = 0 (0x00) 00000000
* 40[7:0] "It's a secret"
* = 0 (0x00) 00000000
*/
{ 0x40, 0x00 },
/*
* 41 Undocumented = 0 (0x00) 00000000
* 41[7:0] "It's a secret"
* = 0 (0x00) 00000000
*/
{ 0x41, 0x00 },
/*
* 42 Undocumented = 0 (0x00) 00000000
* 42[7:0] "It's a secret"
* = 0 (0x00) 00000000
*/
{ 0x42, 0x00 },
/*
* 43 Undocumented = 0 (0x00) 00000000
* 43[7:0] "It's a secret"
* = 0 (0x00) 00000000
*/
{ 0x43, 0x00 },
/*
* 45 Undocumented = 128 (0x80) 10000000
* 45[7:0] "It's a secret"
* = 128 (0x80) 10000000
*/
{ 0x45, 0x80 },
/*
* 48 Undocumented = 192 (0xC0) 11000000
* 48[7:0] "It's a secret"
* = 192 (0xC0) 11000000
*/
{ 0x48, 0xc0 },
/*
* 49 Undocumented = 25 (0x19) 00011001
* 49[7:0] "It's a secret"
* = 25 (0x19) 00011001
*/
{ 0x49, 0x19 },
/*
* 4B Undocumented = 128 (0x80) 10000000
* 4B[7:0] "It's a secret"
* = 128 (0x80) 10000000
*/
{ 0x4b, 0x80 },
/*
* 4D Undocumented = 196 (0xC4) 11000100
* 4D[7:0] "It's a secret"
* = 196 (0xC4) 11000100
*/
{ 0x4d, 0xc4 },
/*
* 35 VREF "Reference Voltage Control"
* = 76 (0x4c) 01001100
* VREF[7:5] "Column high reference control"
* = 2 (0x02) 010..... "higher voltage"
* VREF[4:2] "Column low reference control"
* = 3 (0x03) ...011.. "Highest voltage"
* VREF[1:0] "Reserved"
* = 0 (0x00) ......00
*/
{ 0x35, 0x4c },
/*
* 3D Undocumented = 0 (0x00) 00000000
* 3D[7:0] "It's a secret"
* = 0 (0x00) 00000000
*/
{ 0x3d, 0x00 },
/*
* 3E Undocumented = 0 (0x00) 00000000
* 3E[7:0] "It's a secret"
* = 0 (0x00) 00000000
*/
{ 0x3e, 0x00 },
/*
* 3B FREFB "Internal Reference Adjustment"
* = 24 (0x18) 00011000
* FREFB[7:0] "Range"
* = 24 (0x18) 00011000
*/
{ 0x3b, 0x18 },
/*
* 33 CHLF "Current Control"
* = 25 (0x19) 00011001
* CHLF[7:6] "Sensor current control"
* = 0 (0x00) 00......
* CHLF[5] "Sensor current range control"
* = 0 (0x00) ..0..... "normal range"
* CHLF[4] "Sensor current"
* = 1 (0x01) ...1.... "double current"
* CHLF[3] "Sensor buffer current control"
* = 1 (0x01) ....1... "half current"
* CHLF[2] "Column buffer current control"
* = 0 (0x00) .....0.. "normal current"
* CHLF[1] "Analog DSP current control"
* = 0 (0x00) ......0. "normal current"
* CHLF[1] "ADC current control"
* = 0 (0x00) ......0. "normal current"
*/
{ 0x33, 0x19 },
/*
* 34 VBLM "Blooming Control"
* = 90 (0x5A) 01011010
* VBLM[7] "Hard soft reset switch"
* = 0 (0x00) 0....... "Hard reset"
* VBLM[6:4] "Blooming voltage selection"
* = 5 (0x05) .101....
* VBLM[3:0] "Sensor current control"
* = 10 (0x0A) ....1010
*/
{ 0x34, 0x5a },
/*
* 3B FREFB "Internal Reference Adjustment"
* = 0 (0x00) 00000000
* FREFB[7:0] "Range"
* = 0 (0x00) 00000000
*/
{ 0x3b, 0x00 },
/*
* 33 CHLF "Current Control"
* = 9 (0x09) 00001001
* CHLF[7:6] "Sensor current control"
* = 0 (0x00) 00......
* CHLF[5] "Sensor current range control"
* = 0 (0x00) ..0..... "normal range"
* CHLF[4] "Sensor current"
* = 0 (0x00) ...0.... "normal current"
* CHLF[3] "Sensor buffer current control"
* = 1 (0x01) ....1... "half current"
* CHLF[2] "Column buffer current control"
* = 0 (0x00) .....0.. "normal current"
* CHLF[1] "Analog DSP current control"
* = 0 (0x00) ......0. "normal current"
* CHLF[1] "ADC current control"
* = 0 (0x00) ......0. "normal current"
*/
{ 0x33, 0x09 },
/*
* 34 VBLM "Blooming Control"
* = 80 (0x50) 01010000
* VBLM[7] "Hard soft reset switch"
* = 0 (0x00) 0....... "Hard reset"
* VBLM[6:4] "Blooming voltage selection"
* = 5 (0x05) .101....
* VBLM[3:0] "Sensor current control"
* = 0 (0x00) ....0000
*/
{ 0x34, 0x50 },
/*
* 12 COMH "Common Control H"
* = 64 (0x40) 01000000
* COMH[7] "SRST"
* = 0 (0x00) 0....... "No-op"
* COMH[6:4] "Resolution selection"
* = 4 (0x04) .100.... "XGA"
* COMH[3] "Master slave selection"
* = 0 (0x00) ....0... "Master mode"
* COMH[2] "Internal B/R channel option"
* = 0 (0x00) .....0.. "B/R use same channel"
* COMH[1] "Color bar test pattern"
* = 0 (0x00) ......0. "Off"
* COMH[0] "Reserved"
* = 0 (0x00) .......0
*/
{ 0x12, 0x40 },
/*
* 17 HREFST "Horizontal window start"
* = 31 (0x1F) 00011111
* HREFST[7:0] "Horizontal window start, 8 MSBs"
* = 31 (0x1F) 00011111
*/
{ 0x17, 0x1f },
/*
* 18 HREFEND "Horizontal window end"
* = 95 (0x5F) 01011111
* HREFEND[7:0] "Horizontal Window End, 8 MSBs"
* = 95 (0x5F) 01011111
*/
{ 0x18, 0x5f },
/*
* 19 VSTRT "Vertical window start"
* = 0 (0x00) 00000000
* VSTRT[7:0] "Vertical Window Start, 8 MSBs"
* = 0 (0x00) 00000000
*/
{ 0x19, 0x00 },
/*
* 1A VEND "Vertical window end"
* = 96 (0x60) 01100000
* VEND[7:0] "Vertical Window End, 8 MSBs"
* = 96 (0x60) 01100000
*/
{ 0x1a, 0x60 },
/*
* 32 COMM "Common Control M"
* = 18 (0x12) 00010010
* COMM[7:6] "Pixel clock divide option"
* = 0 (0x00) 00...... "/1"
* COMM[5:3] "Horizontal window end position, 3 LSBs"
* = 2 (0x02) ..010...
* COMM[2:0] "Horizontal window start position, 3 LSBs"
* = 2 (0x02) .....010
*/
{ 0x32, 0x12 },
/*
* 03 COMA "Common Control A"
* = 74 (0x4A) 01001010
* COMA[7:4] "AWB Update Threshold"
* = 4 (0x04) 0100....
* COMA[3:2] "Vertical window end line control 2 LSBs"
* = 2 (0x02) ....10..
* COMA[1:0] "Vertical window start line control 2 LSBs"
* = 2 (0x02) ......10
*/
{ 0x03, 0x4a },
/*
* 11 CLKRC "Clock Rate Control"
* = 128 (0x80) 10000000
* CLKRC[7] "Internal frequency doublers on off seclection"
* = 1 (0x01) 1....... "On"
* CLKRC[6] "Digital video master slave selection"
* = 0 (0x00) .0...... "Master mode, sensor
* provides PCLK"
* CLKRC[5:0] "Clock divider { CLK = PCLK/(1+CLKRC[5:0]) }"
* = 0 (0x00) ..000000
*/
{ 0x11, 0x80 },
/*
* 12 COMH "Common Control H"
* = 0 (0x00) 00000000
* COMH[7] "SRST"
* = 0 (0x00) 0....... "No-op"
* COMH[6:4] "Resolution selection"
* = 0 (0x00) .000.... "QXGA"
* COMH[3] "Master slave selection"
* = 0 (0x00) ....0... "Master mode"
* COMH[2] "Internal B/R channel option"
* = 0 (0x00) .....0.. "B/R use same channel"
* COMH[1] "Color bar test pattern"
* = 0 (0x00) ......0. "Off"
* COMH[0] "Reserved"
* = 0 (0x00) .......0
*/
{ 0x12, 0x00 },
/*
* 12 COMH "Common Control H"
* = 64 (0x40) 01000000
* COMH[7] "SRST"
* = 0 (0x00) 0....... "No-op"
* COMH[6:4] "Resolution selection"
* = 4 (0x04) .100.... "XGA"
* COMH[3] "Master slave selection"
* = 0 (0x00) ....0... "Master mode"
* COMH[2] "Internal B/R channel option"
* = 0 (0x00) .....0.. "B/R use same channel"
* COMH[1] "Color bar test pattern"
* = 0 (0x00) ......0. "Off"
* COMH[0] "Reserved"
* = 0 (0x00) .......0
*/
{ 0x12, 0x40 },
/*
* 17 HREFST "Horizontal window start"
* = 31 (0x1F) 00011111
* HREFST[7:0] "Horizontal window start, 8 MSBs"
* = 31 (0x1F) 00011111
*/
{ 0x17, 0x1f },
/*
* 18 HREFEND "Horizontal window end"
* = 95 (0x5F) 01011111
* HREFEND[7:0] "Horizontal Window End, 8 MSBs"
* = 95 (0x5F) 01011111
*/
{ 0x18, 0x5f },
/*
* 19 VSTRT "Vertical window start"
* = 0 (0x00) 00000000
* VSTRT[7:0] "Vertical Window Start, 8 MSBs"
* = 0 (0x00) 00000000
*/
{ 0x19, 0x00 },
/*
* 1A VEND "Vertical window end"
* = 96 (0x60) 01100000
* VEND[7:0] "Vertical Window End, 8 MSBs"
* = 96 (0x60) 01100000
*/
{ 0x1a, 0x60 },
/*
* 32 COMM "Common Control M"
* = 18 (0x12) 00010010
* COMM[7:6] "Pixel clock divide option"
* = 0 (0x00) 00...... "/1"
* COMM[5:3] "Horizontal window end position, 3 LSBs"
* = 2 (0x02) ..010...
* COMM[2:0] "Horizontal window start position, 3 LSBs"
* = 2 (0x02) .....010
*/
{ 0x32, 0x12 },
/*
* 03 COMA "Common Control A"
* = 74 (0x4A) 01001010
* COMA[7:4] "AWB Update Threshold"
* = 4 (0x04) 0100....
* COMA[3:2] "Vertical window end line control 2 LSBs"
* = 2 (0x02) ....10..
* COMA[1:0] "Vertical window start line control 2 LSBs"
* = 2 (0x02) ......10
*/
{ 0x03, 0x4a },
/*
* 02 RED "Red Gain Control"
* = 175 (0xAF) 10101111
* RED[7] "Action"
* = 1 (0x01) 1....... "gain = 1/(1+bitrev([6:0]))"
* RED[6:0] "Value"
* = 47 (0x2F) .0101111
*/
{ 0x02, 0xaf },
/*
* 2D ADDVSL "VSYNC Pulse Width"
* = 210 (0xD2) 11010010
* ADDVSL[7:0] "VSYNC pulse width, LSB"
* = 210 (0xD2) 11010010
*/
{ 0x2d, 0xd2 },
/*
* 00 GAIN = 24 (0x18) 00011000
* GAIN[7:6] "Reserved"
* = 0 (0x00) 00......
* GAIN[5] "Double"
* = 0 (0x00) ..0..... "False"
* GAIN[4] "Double"
* = 1 (0x01) ...1.... "True"
* GAIN[3:0] "Range"
* = 8 (0x08) ....1000
*/
{ 0x00, 0x18 },
/*
* 01 BLUE "Blue Gain Control"
* = 240 (0xF0) 11110000
* BLUE[7] "Action"
* = 1 (0x01) 1....... "gain = 1/(1+bitrev([6:0]))"
* BLUE[6:0] "Value"
* = 112 (0x70) .1110000
*/
{ 0x01, 0xf0 },
/*
* 10 AEC "Automatic Exposure Control"
* = 10 (0x0A) 00001010
* AEC[7:0] "Automatic Exposure Control, 8 MSBs"
* = 10 (0x0A) 00001010
*/
{ 0x10, 0x0a },
{ 0xe1, 0x67 },
{ 0xe3, 0x03 },
{ 0xe4, 0x26 },
{ 0xe5, 0x3e },
{ 0xf8, 0x01 },
{ 0xff, 0x01 },
};
static const struct ov_i2c_regvals norm_6x20[] = {
{ 0x12, 0x80 }, /* reset */
{ 0x11, 0x01 },
{ 0x03, 0x60 },
{ 0x05, 0x7f }, /* For when autoadjust is off */
{ 0x07, 0xa8 },
/* The ratio of 0x0c and 0x0d controls the white point */
{ 0x0c, 0x24 },
{ 0x0d, 0x24 },
{ 0x0f, 0x15 }, /* COMS */
{ 0x10, 0x75 }, /* AEC Exposure time */
{ 0x12, 0x24 }, /* Enable AGC */
{ 0x14, 0x04 },
/* 0x16: 0x06 helps frame stability with moving objects */
{ 0x16, 0x06 },
/* { 0x20, 0x30 }, * Aperture correction enable */
{ 0x26, 0xb2 }, /* BLC enable */
/* 0x28: 0x05 Selects RGB format if RGB on */
{ 0x28, 0x05 },
{ 0x2a, 0x04 }, /* Disable framerate adjust */
/* { 0x2b, 0xac }, * Framerate; Set 2a[7] first */
{ 0x2d, 0x85 },
{ 0x33, 0xa0 }, /* Color Processing Parameter */
{ 0x34, 0xd2 }, /* Max A/D range */
{ 0x38, 0x8b },
{ 0x39, 0x40 },
{ 0x3c, 0x39 }, /* Enable AEC mode changing */
{ 0x3c, 0x3c }, /* Change AEC mode */
{ 0x3c, 0x24 }, /* Disable AEC mode changing */
{ 0x3d, 0x80 },
/* These next two registers (0x4a, 0x4b) are undocumented.
* They control the color balance */
{ 0x4a, 0x80 },
{ 0x4b, 0x80 },
{ 0x4d, 0xd2 }, /* This reduces noise a bit */
{ 0x4e, 0xc1 },
{ 0x4f, 0x04 },
/* Do 50-53 have any effect? */
/* Toggle 0x12[2] off and on here? */
};
static const struct ov_i2c_regvals norm_6x30[] = {
{ 0x12, 0x80 }, /* Reset */
{ 0x00, 0x1f }, /* Gain */
{ 0x01, 0x99 }, /* Blue gain */
{ 0x02, 0x7c }, /* Red gain */
{ 0x03, 0xc0 }, /* Saturation */
{ 0x05, 0x0a }, /* Contrast */
{ 0x06, 0x95 }, /* Brightness */
{ 0x07, 0x2d }, /* Sharpness */
{ 0x0c, 0x20 },
{ 0x0d, 0x20 },
{ 0x0e, 0xa0 }, /* Was 0x20, bit7 enables a 2x gain which we need */
{ 0x0f, 0x05 },
{ 0x10, 0x9a },
{ 0x11, 0x00 }, /* Pixel clock = fastest */
{ 0x12, 0x24 }, /* Enable AGC and AWB */
{ 0x13, 0x21 },
{ 0x14, 0x80 },
{ 0x15, 0x01 },
{ 0x16, 0x03 },
{ 0x17, 0x38 },
{ 0x18, 0xea },
{ 0x19, 0x04 },
{ 0x1a, 0x93 },
{ 0x1b, 0x00 },
{ 0x1e, 0xc4 },
{ 0x1f, 0x04 },
{ 0x20, 0x20 },
{ 0x21, 0x10 },
{ 0x22, 0x88 },
{ 0x23, 0xc0 }, /* Crystal circuit power level */
{ 0x25, 0x9a }, /* Increase AEC black ratio */
{ 0x26, 0xb2 }, /* BLC enable */
{ 0x27, 0xa2 },
{ 0x28, 0x00 },
{ 0x29, 0x00 },
{ 0x2a, 0x84 }, /* 60 Hz power */
{ 0x2b, 0xa8 }, /* 60 Hz power */
{ 0x2c, 0xa0 },
{ 0x2d, 0x95 }, /* Enable auto-brightness */
{ 0x2e, 0x88 },
{ 0x33, 0x26 },
{ 0x34, 0x03 },
{ 0x36, 0x8f },
{ 0x37, 0x80 },
{ 0x38, 0x83 },
{ 0x39, 0x80 },
{ 0x3a, 0x0f },
{ 0x3b, 0x3c },
{ 0x3c, 0x1a },
{ 0x3d, 0x80 },
{ 0x3e, 0x80 },
{ 0x3f, 0x0e },
{ 0x40, 0x00 }, /* White bal */
{ 0x41, 0x00 }, /* White bal */
{ 0x42, 0x80 },
{ 0x43, 0x3f }, /* White bal */
{ 0x44, 0x80 },
{ 0x45, 0x20 },
{ 0x46, 0x20 },
{ 0x47, 0x80 },
{ 0x48, 0x7f },
{ 0x49, 0x00 },
{ 0x4a, 0x00 },
{ 0x4b, 0x80 },
{ 0x4c, 0xd0 },
{ 0x4d, 0x10 }, /* U = 0.563u, V = 0.714v */
{ 0x4e, 0x40 },
{ 0x4f, 0x07 }, /* UV avg., col. killer: max */
{ 0x50, 0xff },
{ 0x54, 0x23 }, /* Max AGC gain: 18dB */
{ 0x55, 0xff },
{ 0x56, 0x12 },
{ 0x57, 0x81 },
{ 0x58, 0x75 },
{ 0x59, 0x01 }, /* AGC dark current comp.: +1 */
{ 0x5a, 0x2c },
{ 0x5b, 0x0f }, /* AWB chrominance levels */
{ 0x5c, 0x10 },
{ 0x3d, 0x80 },
{ 0x27, 0xa6 },
{ 0x12, 0x20 }, /* Toggle AWB */
{ 0x12, 0x24 },
};
/* Lawrence Glaister <lg@jfm.bc.ca> reports:
*
* Register 0x0f in the 7610 has the following effects:
*
* 0x85 (AEC method 1): Best overall, good contrast range
* 0x45 (AEC method 2): Very overexposed
* 0xa5 (spec sheet default): Ok, but the black level is
* shifted resulting in loss of contrast
* 0x05 (old driver setting): very overexposed, too much
* contrast
*/
static const struct ov_i2c_regvals norm_7610[] = {
{ 0x10, 0xff },
{ 0x16, 0x06 },
{ 0x28, 0x24 },
{ 0x2b, 0xac },
{ 0x12, 0x00 },
{ 0x38, 0x81 },
{ 0x28, 0x24 }, /* 0c */
{ 0x0f, 0x85 }, /* lg's setting */
{ 0x15, 0x01 },
{ 0x20, 0x1c },
{ 0x23, 0x2a },
{ 0x24, 0x10 },
{ 0x25, 0x8a },
{ 0x26, 0xa2 },
{ 0x27, 0xc2 },
{ 0x2a, 0x04 },
{ 0x2c, 0xfe },
{ 0x2d, 0x93 },
{ 0x30, 0x71 },
{ 0x31, 0x60 },
{ 0x32, 0x26 },
{ 0x33, 0x20 },
{ 0x34, 0x48 },
{ 0x12, 0x24 },
{ 0x11, 0x01 },
{ 0x0c, 0x24 },
{ 0x0d, 0x24 },
};
static const struct ov_i2c_regvals norm_7620[] = {
{ 0x12, 0x80 }, /* reset */
{ 0x00, 0x00 }, /* gain */
{ 0x01, 0x80 }, /* blue gain */
{ 0x02, 0x80 }, /* red gain */
{ 0x03, 0xc0 }, /* OV7670_R03_VREF */
{ 0x06, 0x60 },
{ 0x07, 0x00 },
{ 0x0c, 0x24 },
{ 0x0c, 0x24 },
{ 0x0d, 0x24 },
{ 0x11, 0x01 },
{ 0x12, 0x24 },
{ 0x13, 0x01 },
{ 0x14, 0x84 },
{ 0x15, 0x01 },
{ 0x16, 0x03 },
{ 0x17, 0x2f },
{ 0x18, 0xcf },
{ 0x19, 0x06 },
{ 0x1a, 0xf5 },
{ 0x1b, 0x00 },
{ 0x20, 0x18 },
{ 0x21, 0x80 },
{ 0x22, 0x80 },
{ 0x23, 0x00 },
{ 0x26, 0xa2 },
{ 0x27, 0xea },
{ 0x28, 0x22 }, /* Was 0x20, bit1 enables a 2x gain which we need */
{ 0x29, 0x00 },
{ 0x2a, 0x10 },
{ 0x2b, 0x00 },
{ 0x2c, 0x88 },
{ 0x2d, 0x91 },
{ 0x2e, 0x80 },
{ 0x2f, 0x44 },
{ 0x60, 0x27 },
{ 0x61, 0x02 },
{ 0x62, 0x5f },
{ 0x63, 0xd5 },
{ 0x64, 0x57 },
{ 0x65, 0x83 },
{ 0x66, 0x55 },
{ 0x67, 0x92 },
{ 0x68, 0xcf },
{ 0x69, 0x76 },
{ 0x6a, 0x22 },
{ 0x6b, 0x00 },
{ 0x6c, 0x02 },
{ 0x6d, 0x44 },
{ 0x6e, 0x80 },
{ 0x6f, 0x1d },
{ 0x70, 0x8b },
{ 0x71, 0x00 },
{ 0x72, 0x14 },
{ 0x73, 0x54 },
{ 0x74, 0x00 },
{ 0x75, 0x8e },
{ 0x76, 0x00 },
{ 0x77, 0xff },
{ 0x78, 0x80 },
{ 0x79, 0x80 },
{ 0x7a, 0x80 },
{ 0x7b, 0xe2 },
{ 0x7c, 0x00 },
};
/* 7640 and 7648. The defaults should be OK for most registers. */
static const struct ov_i2c_regvals norm_7640[] = {
{ 0x12, 0x80 },
{ 0x12, 0x14 },
};
static const struct ov_regvals init_519_ov7660[] = {
{ 0x5d, 0x03 }, /* Turn off suspend mode */
{ 0x53, 0x9b }, /* 0x9f enables the (unused) microcontroller */
{ 0x54, 0x0f }, /* bit2 (jpeg enable) */
{ 0xa2, 0x20 }, /* a2-a5 are undocumented */
{ 0xa3, 0x18 },
{ 0xa4, 0x04 },
{ 0xa5, 0x28 },
{ 0x37, 0x00 }, /* SetUsbInit */
{ 0x55, 0x02 }, /* 4.096 Mhz audio clock */
/* Enable both fields, YUV Input, disable defect comp (why?) */
{ 0x20, 0x0c }, /* 0x0d does U <-> V swap */
{ 0x21, 0x38 },
{ 0x22, 0x1d },
{ 0x17, 0x50 }, /* undocumented */
{ 0x37, 0x00 }, /* undocumented */
{ 0x40, 0xff }, /* I2C timeout counter */
{ 0x46, 0x00 }, /* I2C clock prescaler */
};
static const struct ov_i2c_regvals norm_7660[] = {
{OV7670_R12_COM7, OV7670_COM7_RESET},
{OV7670_R11_CLKRC, 0x81},
{0x92, 0x00}, /* DM_LNL */
{0x93, 0x00}, /* DM_LNH */
{0x9d, 0x4c}, /* BD50ST */
{0x9e, 0x3f}, /* BD60ST */
{OV7670_R3B_COM11, 0x02},
{OV7670_R13_COM8, 0xf5},
{OV7670_R10_AECH, 0x00},
{OV7670_R00_GAIN, 0x00},
{OV7670_R01_BLUE, 0x7c},
{OV7670_R02_RED, 0x9d},
{OV7670_R12_COM7, 0x00},
{OV7670_R04_COM1, 00},
{OV7670_R18_HSTOP, 0x01},
{OV7670_R17_HSTART, 0x13},
{OV7670_R32_HREF, 0x92},
{OV7670_R19_VSTART, 0x02},
{OV7670_R1A_VSTOP, 0x7a},
{OV7670_R03_VREF, 0x00},
{OV7670_R0E_COM5, 0x04},
{OV7670_R0F_COM6, 0x62},
{OV7670_R15_COM10, 0x00},
{0x16, 0x02}, /* RSVD */
{0x1b, 0x00}, /* PSHFT */
{OV7670_R1E_MVFP, 0x01},
{0x29, 0x3c}, /* RSVD */
{0x33, 0x00}, /* CHLF */
{0x34, 0x07}, /* ARBLM */
{0x35, 0x84}, /* RSVD */
{0x36, 0x00}, /* RSVD */
{0x37, 0x04}, /* ADC */
{0x39, 0x43}, /* OFON */
{OV7670_R3A_TSLB, 0x00},
{OV7670_R3C_COM12, 0x6c},
{OV7670_R3D_COM13, 0x98},
{OV7670_R3F_EDGE, 0x23},
{OV7670_R40_COM15, 0xc1},
{OV7670_R41_COM16, 0x22},
{0x6b, 0x0a}, /* DBLV */
{0xa1, 0x08}, /* RSVD */
{0x69, 0x80}, /* HV */
{0x43, 0xf0}, /* RSVD.. */
{0x44, 0x10},
{0x45, 0x78},
{0x46, 0xa8},
{0x47, 0x60},
{0x48, 0x80},
{0x59, 0xba},
{0x5a, 0x9a},
{0x5b, 0x22},
{0x5c, 0xb9},
{0x5d, 0x9b},
{0x5e, 0x10},
{0x5f, 0xe0},
{0x60, 0x85},
{0x61, 0x60},
{0x9f, 0x9d}, /* RSVD */
{0xa0, 0xa0}, /* DSPC2 */
{0x4f, 0x60}, /* matrix */
{0x50, 0x64},
{0x51, 0x04},
{0x52, 0x18},
{0x53, 0x3c},
{0x54, 0x54},
{0x55, 0x40},
{0x56, 0x40},
{0x57, 0x40},
{0x58, 0x0d}, /* matrix sign */
{0x8b, 0xcc}, /* RSVD */
{0x8c, 0xcc},
{0x8d, 0xcf},
{0x6c, 0x40}, /* gamma curve */
{0x6d, 0xe0},
{0x6e, 0xa0},
{0x6f, 0x80},
{0x70, 0x70},
{0x71, 0x80},
{0x72, 0x60},
{0x73, 0x60},
{0x74, 0x50},
{0x75, 0x40},
{0x76, 0x38},
{0x77, 0x3c},
{0x78, 0x32},
{0x79, 0x1a},
{0x7a, 0x28},
{0x7b, 0x24},
{0x7c, 0x04}, /* gamma curve */
{0x7d, 0x12},
{0x7e, 0x26},
{0x7f, 0x46},
{0x80, 0x54},
{0x81, 0x64},
{0x82, 0x70},
{0x83, 0x7c},
{0x84, 0x86},
{0x85, 0x8e},
{0x86, 0x9c},
{0x87, 0xab},
{0x88, 0xc4},
{0x89, 0xd1},
{0x8a, 0xe5},
{OV7670_R14_COM9, 0x1e},
{OV7670_R24_AEW, 0x80},
{OV7670_R25_AEB, 0x72},
{OV7670_R26_VPT, 0xb3},
{0x62, 0x80}, /* LCC1 */
{0x63, 0x80}, /* LCC2 */
{0x64, 0x06}, /* LCC3 */
{0x65, 0x00}, /* LCC4 */
{0x66, 0x01}, /* LCC5 */
{0x94, 0x0e}, /* RSVD.. */
{0x95, 0x14},
{OV7670_R13_COM8, OV7670_COM8_FASTAEC
| OV7670_COM8_AECSTEP
| OV7670_COM8_BFILT
| 0x10
| OV7670_COM8_AGC
| OV7670_COM8_AWB
| OV7670_COM8_AEC},
{0xa1, 0xc8}
};
static const struct ov_i2c_regvals norm_9600[] = {
{0x12, 0x80},
{0x0c, 0x28},
{0x11, 0x80},
{0x13, 0xb5},
{0x14, 0x3e},
{0x1b, 0x04},
{0x24, 0xb0},
{0x25, 0x90},
{0x26, 0x94},
{0x35, 0x90},
{0x37, 0x07},
{0x38, 0x08},
{0x01, 0x8e},
{0x02, 0x85}
};
/* 7670. Defaults taken from OmniVision provided data,
* as provided by Jonathan Corbet of OLPC */
static const struct ov_i2c_regvals norm_7670[] = {
{ OV7670_R12_COM7, OV7670_COM7_RESET },
{ OV7670_R3A_TSLB, 0x04 }, /* OV */
{ OV7670_R12_COM7, OV7670_COM7_FMT_VGA }, /* VGA */
{ OV7670_R11_CLKRC, 0x01 },
/*
* Set the hardware window. These values from OV don't entirely
* make sense - hstop is less than hstart. But they work...
*/
{ OV7670_R17_HSTART, 0x13 },
{ OV7670_R18_HSTOP, 0x01 },
{ OV7670_R32_HREF, 0xb6 },
{ OV7670_R19_VSTART, 0x02 },
{ OV7670_R1A_VSTOP, 0x7a },
{ OV7670_R03_VREF, 0x0a },
{ OV7670_R0C_COM3, 0x00 },
{ OV7670_R3E_COM14, 0x00 },
/* Mystery scaling numbers */
{ 0x70, 0x3a },
{ 0x71, 0x35 },
{ 0x72, 0x11 },
{ 0x73, 0xf0 },
{ 0xa2, 0x02 },
/* { OV7670_R15_COM10, 0x0 }, */
/* Gamma curve values */
{ 0x7a, 0x20 },
{ 0x7b, 0x10 },
{ 0x7c, 0x1e },
{ 0x7d, 0x35 },
{ 0x7e, 0x5a },
{ 0x7f, 0x69 },
{ 0x80, 0x76 },
{ 0x81, 0x80 },
{ 0x82, 0x88 },
{ 0x83, 0x8f },
{ 0x84, 0x96 },
{ 0x85, 0xa3 },
{ 0x86, 0xaf },
{ 0x87, 0xc4 },
{ 0x88, 0xd7 },
{ 0x89, 0xe8 },
/* AGC and AEC parameters. Note we start by disabling those features,
then turn them only after tweaking the values. */
{ OV7670_R13_COM8, OV7670_COM8_FASTAEC
| OV7670_COM8_AECSTEP
| OV7670_COM8_BFILT },
{ OV7670_R00_GAIN, 0x00 },
{ OV7670_R10_AECH, 0x00 },
{ OV7670_R0D_COM4, 0x40 }, /* magic reserved bit */
{ OV7670_R14_COM9, 0x18 }, /* 4x gain + magic rsvd bit */
{ OV7670_RA5_BD50MAX, 0x05 },
{ OV7670_RAB_BD60MAX, 0x07 },
{ OV7670_R24_AEW, 0x95 },
{ OV7670_R25_AEB, 0x33 },
{ OV7670_R26_VPT, 0xe3 },
{ OV7670_R9F_HAECC1, 0x78 },
{ OV7670_RA0_HAECC2, 0x68 },
{ 0xa1, 0x03 }, /* magic */
{ OV7670_RA6_HAECC3, 0xd8 },
{ OV7670_RA7_HAECC4, 0xd8 },
{ OV7670_RA8_HAECC5, 0xf0 },
{ OV7670_RA9_HAECC6, 0x90 },
{ OV7670_RAA_HAECC7, 0x94 },
{ OV7670_R13_COM8, OV7670_COM8_FASTAEC
| OV7670_COM8_AECSTEP
| OV7670_COM8_BFILT
| OV7670_COM8_AGC
| OV7670_COM8_AEC },
/* Almost all of these are magic "reserved" values. */
{ OV7670_R0E_COM5, 0x61 },
{ OV7670_R0F_COM6, 0x4b },
{ 0x16, 0x02 },
{ OV7670_R1E_MVFP, 0x07 },
{ 0x21, 0x02 },
{ 0x22, 0x91 },
{ 0x29, 0x07 },
{ 0x33, 0x0b },
{ 0x35, 0x0b },
{ 0x37, 0x1d },
{ 0x38, 0x71 },
{ 0x39, 0x2a },
{ OV7670_R3C_COM12, 0x78 },
{ 0x4d, 0x40 },
{ 0x4e, 0x20 },
{ OV7670_R69_GFIX, 0x00 },
{ 0x6b, 0x4a },
{ 0x74, 0x10 },
{ 0x8d, 0x4f },
{ 0x8e, 0x00 },
{ 0x8f, 0x00 },
{ 0x90, 0x00 },
{ 0x91, 0x00 },
{ 0x96, 0x00 },
{ 0x9a, 0x00 },
{ 0xb0, 0x84 },
{ 0xb1, 0x0c },
{ 0xb2, 0x0e },
{ 0xb3, 0x82 },
{ 0xb8, 0x0a },
/* More reserved magic, some of which tweaks white balance */
{ 0x43, 0x0a },
{ 0x44, 0xf0 },
{ 0x45, 0x34 },
{ 0x46, 0x58 },
{ 0x47, 0x28 },
{ 0x48, 0x3a },
{ 0x59, 0x88 },
{ 0x5a, 0x88 },
{ 0x5b, 0x44 },
{ 0x5c, 0x67 },
{ 0x5d, 0x49 },
{ 0x5e, 0x0e },
{ 0x6c, 0x0a },
{ 0x6d, 0x55 },
{ 0x6e, 0x11 },
{ 0x6f, 0x9f }, /* "9e for advance AWB" */
{ 0x6a, 0x40 },
{ OV7670_R01_BLUE, 0x40 },
{ OV7670_R02_RED, 0x60 },
{ OV7670_R13_COM8, OV7670_COM8_FASTAEC
| OV7670_COM8_AECSTEP
| OV7670_COM8_BFILT
| OV7670_COM8_AGC
| OV7670_COM8_AEC
| OV7670_COM8_AWB },
/* Matrix coefficients */
{ 0x4f, 0x80 },
{ 0x50, 0x80 },
{ 0x51, 0x00 },
{ 0x52, 0x22 },
{ 0x53, 0x5e },
{ 0x54, 0x80 },
{ 0x58, 0x9e },
{ OV7670_R41_COM16, OV7670_COM16_AWBGAIN },
{ OV7670_R3F_EDGE, 0x00 },
{ 0x75, 0x05 },
{ 0x76, 0xe1 },
{ 0x4c, 0x00 },
{ 0x77, 0x01 },
{ OV7670_R3D_COM13, OV7670_COM13_GAMMA
| OV7670_COM13_UVSAT
| 2}, /* was 3 */
{ 0x4b, 0x09 },
{ 0xc9, 0x60 },
{ OV7670_R41_COM16, 0x38 },
{ 0x56, 0x40 },
{ 0x34, 0x11 },
{ OV7670_R3B_COM11, OV7670_COM11_EXP|OV7670_COM11_HZAUTO },
{ 0xa4, 0x88 },
{ 0x96, 0x00 },
{ 0x97, 0x30 },
{ 0x98, 0x20 },
{ 0x99, 0x30 },
{ 0x9a, 0x84 },
{ 0x9b, 0x29 },
{ 0x9c, 0x03 },
{ 0x9d, 0x4c },
{ 0x9e, 0x3f },
{ 0x78, 0x04 },
/* Extra-weird stuff. Some sort of multiplexor register */
{ 0x79, 0x01 },
{ 0xc8, 0xf0 },
{ 0x79, 0x0f },
{ 0xc8, 0x00 },
{ 0x79, 0x10 },
{ 0xc8, 0x7e },
{ 0x79, 0x0a },
{ 0xc8, 0x80 },
{ 0x79, 0x0b },
{ 0xc8, 0x01 },
{ 0x79, 0x0c },
{ 0xc8, 0x0f },
{ 0x79, 0x0d },
{ 0xc8, 0x20 },
{ 0x79, 0x09 },
{ 0xc8, 0x80 },
{ 0x79, 0x02 },
{ 0xc8, 0xc0 },
{ 0x79, 0x03 },
{ 0xc8, 0x40 },
{ 0x79, 0x05 },
{ 0xc8, 0x30 },
{ 0x79, 0x26 },
};
static const struct ov_i2c_regvals norm_8610[] = {
{ 0x12, 0x80 },
{ 0x00, 0x00 },
{ 0x01, 0x80 },
{ 0x02, 0x80 },
{ 0x03, 0xc0 },
{ 0x04, 0x30 },
{ 0x05, 0x30 }, /* was 0x10, new from windrv 090403 */
{ 0x06, 0x70 }, /* was 0x80, new from windrv 090403 */
{ 0x0a, 0x86 },
{ 0x0b, 0xb0 },
{ 0x0c, 0x20 },
{ 0x0d, 0x20 },
{ 0x11, 0x01 },
{ 0x12, 0x25 },
{ 0x13, 0x01 },
{ 0x14, 0x04 },
{ 0x15, 0x01 }, /* Lin and Win think different about UV order */
{ 0x16, 0x03 },
{ 0x17, 0x38 }, /* was 0x2f, new from windrv 090403 */
{ 0x18, 0xea }, /* was 0xcf, new from windrv 090403 */
{ 0x19, 0x02 }, /* was 0x06, new from windrv 090403 */
{ 0x1a, 0xf5 },
{ 0x1b, 0x00 },
{ 0x20, 0xd0 }, /* was 0x90, new from windrv 090403 */
{ 0x23, 0xc0 }, /* was 0x00, new from windrv 090403 */
{ 0x24, 0x30 }, /* was 0x1d, new from windrv 090403 */
{ 0x25, 0x50 }, /* was 0x57, new from windrv 090403 */
{ 0x26, 0xa2 },
{ 0x27, 0xea },
{ 0x28, 0x00 },
{ 0x29, 0x00 },
{ 0x2a, 0x80 },
{ 0x2b, 0xc8 }, /* was 0xcc, new from windrv 090403 */
{ 0x2c, 0xac },
{ 0x2d, 0x45 }, /* was 0xd5, new from windrv 090403 */
{ 0x2e, 0x80 },
{ 0x2f, 0x14 }, /* was 0x01, new from windrv 090403 */
{ 0x4c, 0x00 },
{ 0x4d, 0x30 }, /* was 0x10, new from windrv 090403 */
{ 0x60, 0x02 }, /* was 0x01, new from windrv 090403 */
{ 0x61, 0x00 }, /* was 0x09, new from windrv 090403 */
{ 0x62, 0x5f }, /* was 0xd7, new from windrv 090403 */
{ 0x63, 0xff },
{ 0x64, 0x53 }, /* new windrv 090403 says 0x57,
* maybe that's wrong */
{ 0x65, 0x00 },
{ 0x66, 0x55 },
{ 0x67, 0xb0 },
{ 0x68, 0xc0 }, /* was 0xaf, new from windrv 090403 */
{ 0x69, 0x02 },
{ 0x6a, 0x22 },
{ 0x6b, 0x00 },
{ 0x6c, 0x99 }, /* was 0x80, old windrv says 0x00, but
* deleting bit7 colors the first images red */
{ 0x6d, 0x11 }, /* was 0x00, new from windrv 090403 */
{ 0x6e, 0x11 }, /* was 0x00, new from windrv 090403 */
{ 0x6f, 0x01 },
{ 0x70, 0x8b },
{ 0x71, 0x00 },
{ 0x72, 0x14 },
{ 0x73, 0x54 },
{ 0x74, 0x00 },/* 0x60? - was 0x00, new from windrv 090403 */
{ 0x75, 0x0e },
{ 0x76, 0x02 }, /* was 0x02, new from windrv 090403 */
{ 0x77, 0xff },
{ 0x78, 0x80 },
{ 0x79, 0x80 },
{ 0x7a, 0x80 },
{ 0x7b, 0x10 }, /* was 0x13, new from windrv 090403 */
{ 0x7c, 0x00 },
{ 0x7d, 0x08 }, /* was 0x09, new from windrv 090403 */
{ 0x7e, 0x08 }, /* was 0xc0, new from windrv 090403 */
{ 0x7f, 0xfb },
{ 0x80, 0x28 },
{ 0x81, 0x00 },
{ 0x82, 0x23 },
{ 0x83, 0x0b },
{ 0x84, 0x00 },
{ 0x85, 0x62 }, /* was 0x61, new from windrv 090403 */
{ 0x86, 0xc9 },
{ 0x87, 0x00 },
{ 0x88, 0x00 },
{ 0x89, 0x01 },
{ 0x12, 0x20 },
{ 0x12, 0x25 }, /* was 0x24, new from windrv 090403 */
};
static unsigned char ov7670_abs_to_sm(unsigned char v)
{
if (v > 127)
return v & 0x7f;
return (128 - v) | 0x80;
}
/* Write a OV519 register */
static void reg_w(struct sd *sd, u16 index, u16 value)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int ret, req = 0;
if (sd->gspca_dev.usb_err < 0)
return;
/* Avoid things going to fast for the bridge with a xhci host */
udelay(150);
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
req = 2;
break;
case BRIDGE_OVFX2:
req = 0x0a;
/* fall through */
case BRIDGE_W9968CF:
gspca_dbg(gspca_dev, D_USBO, "SET %02x %04x %04x\n",
req, value, index);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
goto leave;
default:
req = 1;
}
gspca_dbg(gspca_dev, D_USBO, "SET %02x 0000 %04x %02x\n",
req, index, value);
sd->gspca_dev.usb_buf[0] = value;
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
req,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index,
sd->gspca_dev.usb_buf, 1, 500);
leave:
if (ret < 0) {
gspca_err(gspca_dev, "reg_w %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
return;
}
}
/* Read from a OV519 register, note not valid for the w9968cf!! */
/* returns: negative is error, pos or zero is data */
static int reg_r(struct sd *sd, u16 index)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int ret;
int req;
if (sd->gspca_dev.usb_err < 0)
return -1;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
req = 3;
break;
case BRIDGE_OVFX2:
req = 0x0b;
break;
default:
req = 1;
}
/* Avoid things going to fast for the bridge with a xhci host */
udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, sd->gspca_dev.usb_buf, 1, 500);
if (ret >= 0) {
ret = sd->gspca_dev.usb_buf[0];
gspca_dbg(gspca_dev, D_USBI, "GET %02x 0000 %04x %02x\n",
req, index, ret);
} else {
gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
/*
* Make sure the result is zeroed to avoid uninitialized
* values.
*/
gspca_dev->usb_buf[0] = 0;
}
return ret;
}
/* Read 8 values from a OV519 register */
static int reg_r8(struct sd *sd,
u16 index)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int ret;
if (sd->gspca_dev.usb_err < 0)
return -1;
/* Avoid things going to fast for the bridge with a xhci host */
udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
1, /* REQ_IO */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, sd->gspca_dev.usb_buf, 8, 500);
if (ret >= 0) {
ret = sd->gspca_dev.usb_buf[0];
} else {
gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
/*
* Make sure the buffer is zeroed to avoid uninitialized
* values.
*/
memset(gspca_dev->usb_buf, 0, 8);
}
return ret;
}
/*
* Writes bits at positions specified by mask to an OV51x reg. Bits that are in
* the same position as 1's in "mask" are cleared and set to "value". Bits
* that are in the same position as 0's in "mask" are preserved, regardless
* of their respective state in "value".
*/
static void reg_w_mask(struct sd *sd,
u16 index,
u8 value,
u8 mask)
{
int ret;
u8 oldval;
if (mask != 0xff) {
value &= mask; /* Enforce mask on value */
ret = reg_r(sd, index);
if (ret < 0)
return;
oldval = ret & ~mask; /* Clear the masked bits */
value |= oldval; /* Set the desired bits */
}
reg_w(sd, index, value);
}
/*
* Writes multiple (n) byte value to a single register. Only valid with certain
* registers (0x30 and 0xc4 - 0xce).
*/
static void ov518_reg_w32(struct sd *sd, u16 index, u32 value, int n)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int ret;
if (sd->gspca_dev.usb_err < 0)
return;
*((__le32 *) sd->gspca_dev.usb_buf) = __cpu_to_le32(value);
/* Avoid things going to fast for the bridge with a xhci host */
udelay(150);
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
1 /* REG_IO */,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index,
sd->gspca_dev.usb_buf, n, 500);
if (ret < 0) {
gspca_err(gspca_dev, "reg_w32 %02x failed %d\n", index, ret);
sd->gspca_dev.usb_err = ret;
}
}
static void ov511_i2c_w(struct sd *sd, u8 reg, u8 value)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int rc, retries;
gspca_dbg(gspca_dev, D_USBO, "ov511_i2c_w %02x %02x\n", reg, value);
/* Three byte write cycle */
for (retries = 6; ; ) {
/* Select camera register */
reg_w(sd, R51x_I2C_SADDR_3, reg);
/* Write "value" to I2C data port of OV511 */
reg_w(sd, R51x_I2C_DATA, value);
/* Initiate 3-byte write cycle */
reg_w(sd, R511_I2C_CTL, 0x01);
do {
rc = reg_r(sd, R511_I2C_CTL);
} while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
if (rc < 0)
return;
if ((rc & 2) == 0) /* Ack? */
break;
if (--retries < 0) {
gspca_dbg(gspca_dev, D_USBO, "i2c write retries exhausted\n");
return;
}
}
}
static int ov511_i2c_r(struct sd *sd, u8 reg)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int rc, value, retries;
/* Two byte write cycle */
for (retries = 6; ; ) {
/* Select camera register */
reg_w(sd, R51x_I2C_SADDR_2, reg);
/* Initiate 2-byte write cycle */
reg_w(sd, R511_I2C_CTL, 0x03);
do {
rc = reg_r(sd, R511_I2C_CTL);
} while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
if (rc < 0)
return rc;
if ((rc & 2) == 0) /* Ack? */
break;
/* I2C abort */
reg_w(sd, R511_I2C_CTL, 0x10);
if (--retries < 0) {
gspca_dbg(gspca_dev, D_USBI, "i2c write retries exhausted\n");
return -1;
}
}
/* Two byte read cycle */
for (retries = 6; ; ) {
/* Initiate 2-byte read cycle */
reg_w(sd, R511_I2C_CTL, 0x05);
do {
rc = reg_r(sd, R511_I2C_CTL);
} while (rc > 0 && ((rc & 1) == 0)); /* Retry until idle */
if (rc < 0)
return rc;
if ((rc & 2) == 0) /* Ack? */
break;
/* I2C abort */
reg_w(sd, R511_I2C_CTL, 0x10);
if (--retries < 0) {
gspca_dbg(gspca_dev, D_USBI, "i2c read retries exhausted\n");
return -1;
}
}
value = reg_r(sd, R51x_I2C_DATA);
gspca_dbg(gspca_dev, D_USBI, "ov511_i2c_r %02x %02x\n", reg, value);
/* This is needed to make i2c_w() work */
reg_w(sd, R511_I2C_CTL, 0x05);
return value;
}
/*
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_w(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
static void ov518_i2c_w(struct sd *sd,
u8 reg,
u8 value)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
gspca_dbg(gspca_dev, D_USBO, "ov518_i2c_w %02x %02x\n", reg, value);
/* Select camera register */
reg_w(sd, R51x_I2C_SADDR_3, reg);
/* Write "value" to I2C data port of OV511 */
reg_w(sd, R51x_I2C_DATA, value);
/* Initiate 3-byte write cycle */
reg_w(sd, R518_I2C_CTL, 0x01);
/* wait for write complete */
msleep(4);
reg_r8(sd, R518_I2C_CTL);
}
/*
* returns: negative is error, pos or zero is data
*
* The OV518 I2C I/O procedure is different, hence, this function.
* This is normally only called from i2c_r(). Note that this function
* always succeeds regardless of whether the sensor is present and working.
*/
static int ov518_i2c_r(struct sd *sd, u8 reg)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int value;
/* Select camera register */
reg_w(sd, R51x_I2C_SADDR_2, reg);
/* Initiate 2-byte write cycle */
reg_w(sd, R518_I2C_CTL, 0x03);
reg_r8(sd, R518_I2C_CTL);
/* Initiate 2-byte read cycle */
reg_w(sd, R518_I2C_CTL, 0x05);
reg_r8(sd, R518_I2C_CTL);
value = reg_r(sd, R51x_I2C_DATA);
gspca_dbg(gspca_dev, D_USBI, "ov518_i2c_r %02x %02x\n", reg, value);
return value;
}
static void ovfx2_i2c_w(struct sd *sd, u8 reg, u8 value)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int ret;
if (sd->gspca_dev.usb_err < 0)
return;
ret = usb_control_msg(sd->gspca_dev.dev,
usb_sndctrlpipe(sd->gspca_dev.dev, 0),
0x02,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
(u16) value, (u16) reg, NULL, 0, 500);
if (ret < 0) {
gspca_err(gspca_dev, "ovfx2_i2c_w %02x failed %d\n", reg, ret);
sd->gspca_dev.usb_err = ret;
}
gspca_dbg(gspca_dev, D_USBO, "ovfx2_i2c_w %02x %02x\n", reg, value);
}
static int ovfx2_i2c_r(struct sd *sd, u8 reg)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int ret;
if (sd->gspca_dev.usb_err < 0)
return -1;
ret = usb_control_msg(sd->gspca_dev.dev,
usb_rcvctrlpipe(sd->gspca_dev.dev, 0),
0x03,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, (u16) reg, sd->gspca_dev.usb_buf, 1, 500);
if (ret >= 0) {
ret = sd->gspca_dev.usb_buf[0];
gspca_dbg(gspca_dev, D_USBI, "ovfx2_i2c_r %02x %02x\n",
reg, ret);
} else {
gspca_err(gspca_dev, "ovfx2_i2c_r %02x failed %d\n", reg, ret);
sd->gspca_dev.usb_err = ret;
}
return ret;
}
static void i2c_w(struct sd *sd, u8 reg, u8 value)
{
if (sd->sensor_reg_cache[reg] == value)
return;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
ov511_i2c_w(sd, reg, value);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
case BRIDGE_OV519:
ov518_i2c_w(sd, reg, value);
break;
case BRIDGE_OVFX2:
ovfx2_i2c_w(sd, reg, value);
break;
case BRIDGE_W9968CF:
w9968cf_i2c_w(sd, reg, value);
break;
}
if (sd->gspca_dev.usb_err >= 0) {
/* Up on sensor reset empty the register cache */
if (reg == 0x12 && (value & 0x80))
memset(sd->sensor_reg_cache, -1,
sizeof(sd->sensor_reg_cache));
else
sd->sensor_reg_cache[reg] = value;
}
}
static int i2c_r(struct sd *sd, u8 reg)
{
int ret = -1;
if (sd->sensor_reg_cache[reg] != -1)
return sd->sensor_reg_cache[reg];
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
ret = ov511_i2c_r(sd, reg);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
case BRIDGE_OV519:
ret = ov518_i2c_r(sd, reg);
break;
case BRIDGE_OVFX2:
ret = ovfx2_i2c_r(sd, reg);
break;
case BRIDGE_W9968CF:
ret = w9968cf_i2c_r(sd, reg);
break;
}
if (ret >= 0)
sd->sensor_reg_cache[reg] = ret;
return ret;
}
/* Writes bits at positions specified by mask to an I2C reg. Bits that are in
* the same position as 1's in "mask" are cleared and set to "value". Bits
* that are in the same position as 0's in "mask" are preserved, regardless
* of their respective state in "value".
*/
static void i2c_w_mask(struct sd *sd,
u8 reg,
u8 value,
u8 mask)
{
int rc;
u8 oldval;
value &= mask; /* Enforce mask on value */
rc = i2c_r(sd, reg);
if (rc < 0)
return;
oldval = rc & ~mask; /* Clear the masked bits */
value |= oldval; /* Set the desired bits */
i2c_w(sd, reg, value);
}
/* Temporarily stops OV511 from functioning. Must do this before changing
* registers while the camera is streaming */
static inline void ov51x_stop(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
gspca_dbg(gspca_dev, D_STREAM, "stopping\n");
sd->stopped = 1;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
reg_w(sd, R51x_SYS_RESET, 0x3d);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
reg_w_mask(sd, R51x_SYS_RESET, 0x3a, 0x3a);
break;
case BRIDGE_OV519:
reg_w(sd, OV519_R51_RESET1, 0x0f);
reg_w(sd, OV519_R51_RESET1, 0x00);
reg_w(sd, 0x22, 0x00); /* FRAR */
break;
case BRIDGE_OVFX2:
reg_w_mask(sd, 0x0f, 0x00, 0x02);
break;
case BRIDGE_W9968CF:
reg_w(sd, 0x3c, 0x0a05); /* stop USB transfer */
break;
}
}
/* Restarts OV511 after ov511_stop() is called. Has no effect if it is not
* actually stopped (for performance). */
static inline void ov51x_restart(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
gspca_dbg(gspca_dev, D_STREAM, "restarting\n");
if (!sd->stopped)
return;
sd->stopped = 0;
/* Reinitialize the stream */
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
reg_w(sd, R51x_SYS_RESET, 0x00);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
reg_w(sd, 0x2f, 0x80);
reg_w(sd, R51x_SYS_RESET, 0x00);
break;
case BRIDGE_OV519:
reg_w(sd, OV519_R51_RESET1, 0x0f);
reg_w(sd, OV519_R51_RESET1, 0x00);
reg_w(sd, 0x22, 0x1d); /* FRAR */
break;
case BRIDGE_OVFX2:
reg_w_mask(sd, 0x0f, 0x02, 0x02);
break;
case BRIDGE_W9968CF:
reg_w(sd, 0x3c, 0x8a05); /* USB FIFO enable */
break;
}
}
static void ov51x_set_slave_ids(struct sd *sd, u8 slave);
/* This does an initial reset of an OmniVision sensor and ensures that I2C
* is synchronized. Returns <0 on failure.
*/
static int init_ov_sensor(struct sd *sd, u8 slave)
{
int i;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
ov51x_set_slave_ids(sd, slave);
/* Reset the sensor */
i2c_w(sd, 0x12, 0x80);
/* Wait for it to initialize */
msleep(150);
for (i = 0; i < i2c_detect_tries; i++) {
if (i2c_r(sd, OV7610_REG_ID_HIGH) == 0x7f &&
i2c_r(sd, OV7610_REG_ID_LOW) == 0xa2) {
gspca_dbg(gspca_dev, D_PROBE, "I2C synced in %d attempt(s)\n",
i);
return 0;
}
/* Reset the sensor */
i2c_w(sd, 0x12, 0x80);
/* Wait for it to initialize */
msleep(150);
/* Dummy read to sync I2C */
if (i2c_r(sd, 0x00) < 0)
return -1;
}
return -1;
}
/* Set the read and write slave IDs. The "slave" argument is the write slave,
* and the read slave will be set to (slave + 1).
* This should not be called from outside the i2c I/O functions.
* Sets I2C read and write slave IDs. Returns <0 for error
*/
static void ov51x_set_slave_ids(struct sd *sd,
u8 slave)
{
switch (sd->bridge) {
case BRIDGE_OVFX2:
reg_w(sd, OVFX2_I2C_ADDR, slave);
return;
case BRIDGE_W9968CF:
sd->sensor_addr = slave;
return;
}
reg_w(sd, R51x_I2C_W_SID, slave);
reg_w(sd, R51x_I2C_R_SID, slave + 1);
}
static void write_regvals(struct sd *sd,
const struct ov_regvals *regvals,
int n)
{
while (--n >= 0) {
reg_w(sd, regvals->reg, regvals->val);
regvals++;
}
}
static void write_i2c_regvals(struct sd *sd,
const struct ov_i2c_regvals *regvals,
int n)
{
while (--n >= 0) {
i2c_w(sd, regvals->reg, regvals->val);
regvals++;
}
}
/****************************************************************************
*
* OV511 and sensor configuration
*
***************************************************************************/
/* This initializes the OV2x10 / OV3610 / OV3620 / OV9600 */
static void ov_hires_configure(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int high, low;
if (sd->bridge != BRIDGE_OVFX2) {
gspca_err(gspca_dev, "error hires sensors only supported with ovfx2\n");
return;
}
gspca_dbg(gspca_dev, D_PROBE, "starting ov hires configuration\n");
/* Detect sensor (sub)type */
high = i2c_r(sd, 0x0a);
low = i2c_r(sd, 0x0b);
/* info("%x, %x", high, low); */
switch (high) {
case 0x96:
switch (low) {
case 0x40:
gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV2610\n");
sd->sensor = SEN_OV2610;
return;
case 0x41:
gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV2610AE\n");
sd->sensor = SEN_OV2610AE;
return;
case 0xb1:
gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV9600\n");
sd->sensor = SEN_OV9600;
return;
}
break;
case 0x36:
if ((low & 0x0f) == 0x00) {
gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV3610\n");
sd->sensor = SEN_OV3610;
return;
}
break;
}
gspca_err(gspca_dev, "Error unknown sensor type: %02x%02x\n",
high, low);
}
/* This initializes the OV8110, OV8610 sensor. The OV8110 uses
* the same register settings as the OV8610, since they are very similar.
*/
static void ov8xx0_configure(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int rc;
gspca_dbg(gspca_dev, D_PROBE, "starting ov8xx0 configuration\n");
/* Detect sensor (sub)type */
rc = i2c_r(sd, OV7610_REG_COM_I);
if (rc < 0) {
gspca_err(gspca_dev, "Error detecting sensor type\n");
return;
}
if ((rc & 3) == 1)
sd->sensor = SEN_OV8610;
else
gspca_err(gspca_dev, "Unknown image sensor version: %d\n",
rc & 3);
}
/* This initializes the OV7610, OV7620, or OV76BE sensor. The OV76BE uses
* the same register settings as the OV7610, since they are very similar.
*/
static void ov7xx0_configure(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int rc, high, low;
gspca_dbg(gspca_dev, D_PROBE, "starting OV7xx0 configuration\n");
/* Detect sensor (sub)type */
rc = i2c_r(sd, OV7610_REG_COM_I);
/* add OV7670 here
* it appears to be wrongly detected as a 7610 by default */
if (rc < 0) {
gspca_err(gspca_dev, "Error detecting sensor type\n");
return;
}
if ((rc & 3) == 3) {
/* quick hack to make OV7670s work */
high = i2c_r(sd, 0x0a);
low = i2c_r(sd, 0x0b);
/* info("%x, %x", high, low); */
if (high == 0x76 && (low & 0xf0) == 0x70) {
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV76%02x\n",
low);
sd->sensor = SEN_OV7670;
} else {
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7610\n");
sd->sensor = SEN_OV7610;
}
} else if ((rc & 3) == 1) {
/* I don't know what's different about the 76BE yet. */
if (i2c_r(sd, 0x15) & 1) {
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7620AE\n");
sd->sensor = SEN_OV7620AE;
} else {
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV76BE\n");
sd->sensor = SEN_OV76BE;
}
} else if ((rc & 3) == 0) {
/* try to read product id registers */
high = i2c_r(sd, 0x0a);
if (high < 0) {
gspca_err(gspca_dev, "Error detecting camera chip PID\n");
return;
}
low = i2c_r(sd, 0x0b);
if (low < 0) {
gspca_err(gspca_dev, "Error detecting camera chip VER\n");
return;
}
if (high == 0x76) {
switch (low) {
case 0x30:
gspca_err(gspca_dev, "Sensor is an OV7630/OV7635\n");
gspca_err(gspca_dev, "7630 is not supported by this driver\n");
return;
case 0x40:
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7645\n");
sd->sensor = SEN_OV7640; /* FIXME */
break;
case 0x45:
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7645B\n");
sd->sensor = SEN_OV7640; /* FIXME */
break;
case 0x48:
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7648\n");
sd->sensor = SEN_OV7648;
break;
case 0x60:
gspca_dbg(gspca_dev, D_PROBE, "Sensor is a OV7660\n");
sd->sensor = SEN_OV7660;
break;
default:
gspca_err(gspca_dev, "Unknown sensor: 0x76%02x\n",
low);
return;
}
} else {
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV7620\n");
sd->sensor = SEN_OV7620;
}
} else {
gspca_err(gspca_dev, "Unknown image sensor version: %d\n",
rc & 3);
}
}
/* This initializes the OV6620, OV6630, OV6630AE, or OV6630AF sensor. */
static void ov6xx0_configure(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int rc;
gspca_dbg(gspca_dev, D_PROBE, "starting OV6xx0 configuration\n");
/* Detect sensor (sub)type */
rc = i2c_r(sd, OV7610_REG_COM_I);
if (rc < 0) {
gspca_err(gspca_dev, "Error detecting sensor type\n");
return;
}
/* Ugh. The first two bits are the version bits, but
* the entire register value must be used. I guess OVT
* underestimated how many variants they would make. */
switch (rc) {
case 0x00:
sd->sensor = SEN_OV6630;
pr_warn("WARNING: Sensor is an OV66308. Your camera may have been misdetected in previous driver versions.\n");
break;
case 0x01:
sd->sensor = SEN_OV6620;
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV6620\n");
break;
case 0x02:
sd->sensor = SEN_OV6630;
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV66308AE\n");
break;
case 0x03:
sd->sensor = SEN_OV66308AF;
gspca_dbg(gspca_dev, D_PROBE, "Sensor is an OV66308AF\n");
break;
case 0x90:
sd->sensor = SEN_OV6630;
pr_warn("WARNING: Sensor is an OV66307. Your camera may have been misdetected in previous driver versions.\n");
break;
default:
gspca_err(gspca_dev, "FATAL: Unknown sensor version: 0x%02x\n",
rc);
return;
}
/* Set sensor-specific vars */
sd->sif = 1;
}
/* Turns on or off the LED. Only has an effect with OV511+/OV518(+)/OV519 */
static void ov51x_led_control(struct sd *sd, int on)
{
if (sd->invert_led)
on = !on;
switch (sd->bridge) {
/* OV511 has no LED control */
case BRIDGE_OV511PLUS:
reg_w(sd, R511_SYS_LED_CTL, on);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
reg_w_mask(sd, R518_GPIO_OUT, 0x02 * on, 0x02);
break;
case BRIDGE_OV519:
reg_w_mask(sd, OV519_GPIO_DATA_OUT0, on, 1);
break;
}
}
static void sd_reset_snapshot(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (!sd->snapshot_needs_reset)
return;
/* Note it is important that we clear sd->snapshot_needs_reset,
before actually clearing the snapshot state in the bridge
otherwise we might race with the pkt_scan interrupt handler */
sd->snapshot_needs_reset = 0;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
reg_w(sd, R51x_SYS_SNAP, 0x02);
reg_w(sd, R51x_SYS_SNAP, 0x00);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
reg_w(sd, R51x_SYS_SNAP, 0x02); /* Reset */
reg_w(sd, R51x_SYS_SNAP, 0x01); /* Enable */
break;
case BRIDGE_OV519:
reg_w(sd, R51x_SYS_RESET, 0x40);
reg_w(sd, R51x_SYS_RESET, 0x00);
break;
}
}
static void ov51x_upload_quan_tables(struct sd *sd)
{
static const unsigned char yQuanTable511[] = {
0, 1, 1, 2, 2, 3, 3, 4,
1, 1, 1, 2, 2, 3, 4, 4,
1, 1, 2, 2, 3, 4, 4, 4,
2, 2, 2, 3, 4, 4, 4, 4,
2, 2, 3, 4, 4, 5, 5, 5,
3, 3, 4, 4, 5, 5, 5, 5,
3, 4, 4, 4, 5, 5, 5, 5,
4, 4, 4, 4, 5, 5, 5, 5
};
static const unsigned char uvQuanTable511[] = {
0, 2, 2, 3, 4, 4, 4, 4,
2, 2, 2, 4, 4, 4, 4, 4,
2, 2, 3, 4, 4, 4, 4, 4,
3, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4
};
/* OV518 quantization tables are 8x4 (instead of 8x8) */
static const unsigned char yQuanTable518[] = {
5, 4, 5, 6, 6, 7, 7, 7,
5, 5, 5, 5, 6, 7, 7, 7,
6, 6, 6, 6, 7, 7, 7, 8,
7, 7, 6, 7, 7, 7, 8, 8
};
static const unsigned char uvQuanTable518[] = {
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 7,
6, 6, 6, 7, 7, 7, 7, 8,
7, 7, 7, 7, 7, 7, 8, 8
};
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
const unsigned char *pYTable, *pUVTable;
unsigned char val0, val1;
int i, size, reg = R51x_COMP_LUT_BEGIN;
gspca_dbg(gspca_dev, D_PROBE, "Uploading quantization tables\n");
if (sd->bridge == BRIDGE_OV511 || sd->bridge == BRIDGE_OV511PLUS) {
pYTable = yQuanTable511;
pUVTable = uvQuanTable511;
size = 32;
} else {
pYTable = yQuanTable518;
pUVTable = uvQuanTable518;
size = 16;
}
for (i = 0; i < size; i++) {
val0 = *pYTable++;
val1 = *pYTable++;
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
reg_w(sd, reg, val0);
val0 = *pUVTable++;
val1 = *pUVTable++;
val0 &= 0x0f;
val1 &= 0x0f;
val0 |= val1 << 4;
reg_w(sd, reg + size, val0);
reg++;
}
}
/* This initializes the OV511/OV511+ and the sensor */
static void ov511_configure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
/* For 511 and 511+ */
static const struct ov_regvals init_511[] = {
{ R51x_SYS_RESET, 0x7f },
{ R51x_SYS_INIT, 0x01 },
{ R51x_SYS_RESET, 0x7f },
{ R51x_SYS_INIT, 0x01 },
{ R51x_SYS_RESET, 0x3f },
{ R51x_SYS_INIT, 0x01 },
{ R51x_SYS_RESET, 0x3d },
};
static const struct ov_regvals norm_511[] = {
{ R511_DRAM_FLOW_CTL, 0x01 },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
{ R51x_SYS_SNAP, 0x00 },
{ R511_FIFO_OPTS, 0x1f },
{ R511_COMP_EN, 0x00 },
{ R511_COMP_LUT_EN, 0x03 },
};
static const struct ov_regvals norm_511_p[] = {
{ R511_DRAM_FLOW_CTL, 0xff },
{ R51x_SYS_SNAP, 0x00 },
{ R51x_SYS_SNAP, 0x02 },
{ R51x_SYS_SNAP, 0x00 },
{ R511_FIFO_OPTS, 0xff },
{ R511_COMP_EN, 0x00 },
{ R511_COMP_LUT_EN, 0x03 },
};
static const struct ov_regvals compress_511[] = {
{ 0x70, 0x1f },
{ 0x71, 0x05 },
{ 0x72, 0x06 },
{ 0x73, 0x06 },
{ 0x74, 0x14 },
{ 0x75, 0x03 },
{ 0x76, 0x04 },
{ 0x77, 0x04 },
};
gspca_dbg(gspca_dev, D_PROBE, "Device custom id %x\n",
reg_r(sd, R51x_SYS_CUST_ID));
write_regvals(sd, init_511, ARRAY_SIZE(init_511));
switch (sd->bridge) {
case BRIDGE_OV511:
write_regvals(sd, norm_511, ARRAY_SIZE(norm_511));
break;
case BRIDGE_OV511PLUS:
write_regvals(sd, norm_511_p, ARRAY_SIZE(norm_511_p));
break;
}
/* Init compression */
write_regvals(sd, compress_511, ARRAY_SIZE(compress_511));
ov51x_upload_quan_tables(sd);
}
/* This initializes the OV518/OV518+ and the sensor */
static void ov518_configure(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
/* For 518 and 518+ */
static const struct ov_regvals init_518[] = {
{ R51x_SYS_RESET, 0x40 },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x3e },
{ R51x_SYS_INIT, 0xe1 },
{ R51x_SYS_RESET, 0x00 },
{ R51x_SYS_INIT, 0xe1 },
{ 0x46, 0x00 },
{ 0x5d, 0x03 },
};
static const struct ov_regvals norm_518[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
{ 0x5d, 0x03 },
{ 0x24, 0x9f },
{ 0x25, 0x90 },
{ 0x20, 0x00 },
{ 0x51, 0x04 },
{ 0x71, 0x19 },
{ 0x2f, 0x80 },
};
static const struct ov_regvals norm_518_p[] = {
{ R51x_SYS_SNAP, 0x02 }, /* Reset */
{ R51x_SYS_SNAP, 0x01 }, /* Enable */
{ 0x31, 0x0f },
{ 0x5d, 0x03 },
{ 0x24, 0x9f },
{ 0x25, 0x90 },
{ 0x20, 0x60 },
{ 0x51, 0x02 },
{ 0x71, 0x19 },
{ 0x40, 0xff },
{ 0x41, 0x42 },
{ 0x46, 0x00 },
{ 0x33, 0x04 },
{ 0x21, 0x19 },
{ 0x3f, 0x10 },
{ 0x2f, 0x80 },
};
/* First 5 bits of custom ID reg are a revision ID on OV518 */
sd->revision = reg_r(sd, R51x_SYS_CUST_ID) & 0x1f;
gspca_dbg(gspca_dev, D_PROBE, "Device revision %d\n", sd->revision);
write_regvals(sd, init_518, ARRAY_SIZE(init_518));
/* Set LED GPIO pin to output mode */
reg_w_mask(sd, R518_GPIO_CTL, 0x00, 0x02);
switch (sd->bridge) {
case BRIDGE_OV518:
write_regvals(sd, norm_518, ARRAY_SIZE(norm_518));
break;
case BRIDGE_OV518PLUS:
write_regvals(sd, norm_518_p, ARRAY_SIZE(norm_518_p));
break;
}
ov51x_upload_quan_tables(sd);
reg_w(sd, 0x2f, 0x80);
}
static void ov519_configure(struct sd *sd)
{
static const struct ov_regvals init_519[] = {
{ 0x5a, 0x6d }, /* EnableSystem */
{ 0x53, 0x9b }, /* don't enable the microcontroller */
{ OV519_R54_EN_CLK1, 0xff }, /* set bit2 to enable jpeg */
{ 0x5d, 0x03 },
{ 0x49, 0x01 },
{ 0x48, 0x00 },
/* Set LED pin to output mode. Bit 4 must be cleared or sensor
* detection will fail. This deserves further investigation. */
{ OV519_GPIO_IO_CTRL0, 0xee },
{ OV519_R51_RESET1, 0x0f },
{ OV519_R51_RESET1, 0x00 },
{ 0x22, 0x00 },
/* windows reads 0x55 at this point*/
};
write_regvals(sd, init_519, ARRAY_SIZE(init_519));
}
static void ovfx2_configure(struct sd *sd)
{
static const struct ov_regvals init_fx2[] = {
{ 0x00, 0x60 },
{ 0x02, 0x01 },
{ 0x0f, 0x1d },
{ 0xe9, 0x82 },
{ 0xea, 0xc7 },
{ 0xeb, 0x10 },
{ 0xec, 0xf6 },
};
sd->stopped = 1;
write_regvals(sd, init_fx2, ARRAY_SIZE(init_fx2));
}
/* set the mode */
/* This function works for ov7660 only */
static void ov519_set_mode(struct sd *sd)
{
static const struct ov_regvals bridge_ov7660[2][10] = {
{{0x10, 0x14}, {0x11, 0x1e}, {0x12, 0x00}, {0x13, 0x00},
{0x14, 0x00}, {0x15, 0x00}, {0x16, 0x00}, {0x20, 0x0c},
{0x25, 0x01}, {0x26, 0x00}},
{{0x10, 0x28}, {0x11, 0x3c}, {0x12, 0x00}, {0x13, 0x00},
{0x14, 0x00}, {0x15, 0x00}, {0x16, 0x00}, {0x20, 0x0c},
{0x25, 0x03}, {0x26, 0x00}}
};
static const struct ov_i2c_regvals sensor_ov7660[2][3] = {
{{0x12, 0x00}, {0x24, 0x00}, {0x0c, 0x0c}},
{{0x12, 0x00}, {0x04, 0x00}, {0x0c, 0x00}}
};
static const struct ov_i2c_regvals sensor_ov7660_2[] = {
{OV7670_R17_HSTART, 0x13},
{OV7670_R18_HSTOP, 0x01},
{OV7670_R32_HREF, 0x92},
{OV7670_R19_VSTART, 0x02},
{OV7670_R1A_VSTOP, 0x7a},
{OV7670_R03_VREF, 0x00},
/* {0x33, 0x00}, */
/* {0x34, 0x07}, */
/* {0x36, 0x00}, */
/* {0x6b, 0x0a}, */
};
write_regvals(sd, bridge_ov7660[sd->gspca_dev.curr_mode],
ARRAY_SIZE(bridge_ov7660[0]));
write_i2c_regvals(sd, sensor_ov7660[sd->gspca_dev.curr_mode],
ARRAY_SIZE(sensor_ov7660[0]));
write_i2c_regvals(sd, sensor_ov7660_2,
ARRAY_SIZE(sensor_ov7660_2));
}
/* set the frame rate */
/* This function works for sensors ov7640, ov7648 ov7660 and ov7670 only */
static void ov519_set_fr(struct sd *sd)
{
int fr;
u8 clock;
/* frame rate table with indices:
* - mode = 0: 320x240, 1: 640x480
* - fr rate = 0: 30, 1: 25, 2: 20, 3: 15, 4: 10, 5: 5
* - reg = 0: bridge a4, 1: bridge 23, 2: sensor 11 (clock)
*/
static const u8 fr_tb[2][6][3] = {
{{0x04, 0xff, 0x00},
{0x04, 0x1f, 0x00},
{0x04, 0x1b, 0x00},
{0x04, 0x15, 0x00},
{0x04, 0x09, 0x00},
{0x04, 0x01, 0x00}},
{{0x0c, 0xff, 0x00},
{0x0c, 0x1f, 0x00},
{0x0c, 0x1b, 0x00},
{0x04, 0xff, 0x01},
{0x04, 0x1f, 0x01},
{0x04, 0x1b, 0x01}},
};
if (frame_rate > 0)
sd->frame_rate = frame_rate;
if (sd->frame_rate >= 30)
fr = 0;
else if (sd->frame_rate >= 25)
fr = 1;
else if (sd->frame_rate >= 20)
fr = 2;
else if (sd->frame_rate >= 15)
fr = 3;
else if (sd->frame_rate >= 10)
fr = 4;
else
fr = 5;
reg_w(sd, 0xa4, fr_tb[sd->gspca_dev.curr_mode][fr][0]);
reg_w(sd, 0x23, fr_tb[sd->gspca_dev.curr_mode][fr][1]);
clock = fr_tb[sd->gspca_dev.curr_mode][fr][2];
if (sd->sensor == SEN_OV7660)
clock |= 0x80; /* enable double clock */
ov518_i2c_w(sd, OV7670_R11_CLKRC, clock);
}
static void setautogain(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
i2c_w_mask(sd, 0x13, val ? 0x05 : 0x00, 0x05);
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
sd->bridge = id->driver_info & BRIDGE_MASK;
sd->invert_led = (id->driver_info & BRIDGE_INVERT_LED) != 0;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
cam->cam_mode = ov511_vga_mode;
cam->nmodes = ARRAY_SIZE(ov511_vga_mode);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
cam->cam_mode = ov518_vga_mode;
cam->nmodes = ARRAY_SIZE(ov518_vga_mode);
break;
case BRIDGE_OV519:
cam->cam_mode = ov519_vga_mode;
cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
break;
case BRIDGE_OVFX2:
cam->cam_mode = ov519_vga_mode;
cam->nmodes = ARRAY_SIZE(ov519_vga_mode);
cam->bulk_size = OVFX2_BULK_SIZE;
cam->bulk_nurbs = MAX_NURBS;
cam->bulk = 1;
break;
case BRIDGE_W9968CF:
cam->cam_mode = w9968cf_vga_mode;
cam->nmodes = ARRAY_SIZE(w9968cf_vga_mode);
break;
}
sd->frame_rate = 15;
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
ov511_configure(gspca_dev);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ov518_configure(gspca_dev);
break;
case BRIDGE_OV519:
ov519_configure(sd);
break;
case BRIDGE_OVFX2:
ovfx2_configure(sd);
break;
case BRIDGE_W9968CF:
w9968cf_configure(sd);
break;
}
/* The OV519 must be more aggressive about sensor detection since
* I2C write will never fail if the sensor is not present. We have
* to try to initialize the sensor to detect its presence */
sd->sensor = -1;
/* Test for 76xx */
if (init_ov_sensor(sd, OV7xx0_SID) >= 0) {
ov7xx0_configure(sd);
/* Test for 6xx0 */
} else if (init_ov_sensor(sd, OV6xx0_SID) >= 0) {
ov6xx0_configure(sd);
/* Test for 8xx0 */
} else if (init_ov_sensor(sd, OV8xx0_SID) >= 0) {
ov8xx0_configure(sd);
/* Test for 3xxx / 2xxx */
} else if (init_ov_sensor(sd, OV_HIRES_SID) >= 0) {
ov_hires_configure(sd);
} else {
gspca_err(gspca_dev, "Can't determine sensor slave IDs\n");
goto error;
}
if (sd->sensor < 0)
goto error;
ov51x_led_control(sd, 0); /* turn LED off */
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
if (sd->sif) {
cam->cam_mode = ov511_sif_mode;
cam->nmodes = ARRAY_SIZE(ov511_sif_mode);
}
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
if (sd->sif) {
cam->cam_mode = ov518_sif_mode;
cam->nmodes = ARRAY_SIZE(ov518_sif_mode);
}
break;
case BRIDGE_OV519:
if (sd->sif) {
cam->cam_mode = ov519_sif_mode;
cam->nmodes = ARRAY_SIZE(ov519_sif_mode);
}
break;
case BRIDGE_OVFX2:
switch (sd->sensor) {
case SEN_OV2610:
case SEN_OV2610AE:
cam->cam_mode = ovfx2_ov2610_mode;
cam->nmodes = ARRAY_SIZE(ovfx2_ov2610_mode);
break;
case SEN_OV3610:
cam->cam_mode = ovfx2_ov3610_mode;
cam->nmodes = ARRAY_SIZE(ovfx2_ov3610_mode);
break;
case SEN_OV9600:
cam->cam_mode = ovfx2_ov9600_mode;
cam->nmodes = ARRAY_SIZE(ovfx2_ov9600_mode);
break;
default:
if (sd->sif) {
cam->cam_mode = ov519_sif_mode;
cam->nmodes = ARRAY_SIZE(ov519_sif_mode);
}
break;
}
break;
case BRIDGE_W9968CF:
if (sd->sif)
cam->nmodes = ARRAY_SIZE(w9968cf_vga_mode) - 1;
/* w9968cf needs initialisation once the sensor is known */
w9968cf_init(sd);
break;
}
/* initialize the sensor */
switch (sd->sensor) {
case SEN_OV2610:
write_i2c_regvals(sd, norm_2610, ARRAY_SIZE(norm_2610));
/* Enable autogain, autoexpo, awb, bandfilter */
i2c_w_mask(sd, 0x13, 0x27, 0x27);
break;
case SEN_OV2610AE:
write_i2c_regvals(sd, norm_2610ae, ARRAY_SIZE(norm_2610ae));
/* enable autoexpo */
i2c_w_mask(sd, 0x13, 0x05, 0x05);
break;
case SEN_OV3610:
write_i2c_regvals(sd, norm_3620b, ARRAY_SIZE(norm_3620b));
/* Enable autogain, autoexpo, awb, bandfilter */
i2c_w_mask(sd, 0x13, 0x27, 0x27);
break;
case SEN_OV6620:
write_i2c_regvals(sd, norm_6x20, ARRAY_SIZE(norm_6x20));
break;
case SEN_OV6630:
case SEN_OV66308AF:
write_i2c_regvals(sd, norm_6x30, ARRAY_SIZE(norm_6x30));
break;
default:
/* case SEN_OV7610: */
/* case SEN_OV76BE: */
write_i2c_regvals(sd, norm_7610, ARRAY_SIZE(norm_7610));
i2c_w_mask(sd, 0x0e, 0x00, 0x40);
break;
case SEN_OV7620:
case SEN_OV7620AE:
write_i2c_regvals(sd, norm_7620, ARRAY_SIZE(norm_7620));
break;
case SEN_OV7640:
case SEN_OV7648:
write_i2c_regvals(sd, norm_7640, ARRAY_SIZE(norm_7640));
break;
case SEN_OV7660:
i2c_w(sd, OV7670_R12_COM7, OV7670_COM7_RESET);
msleep(14);
reg_w(sd, OV519_R57_SNAPSHOT, 0x23);
write_regvals(sd, init_519_ov7660,
ARRAY_SIZE(init_519_ov7660));
write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660));
sd->gspca_dev.curr_mode = 1; /* 640x480 */
ov519_set_mode(sd);
ov519_set_fr(sd);
sd_reset_snapshot(gspca_dev);
ov51x_restart(sd);
ov51x_stop(sd); /* not in win traces */
ov51x_led_control(sd, 0);
break;
case SEN_OV7670:
write_i2c_regvals(sd, norm_7670, ARRAY_SIZE(norm_7670));
break;
case SEN_OV8610:
write_i2c_regvals(sd, norm_8610, ARRAY_SIZE(norm_8610));
break;
case SEN_OV9600:
write_i2c_regvals(sd, norm_9600, ARRAY_SIZE(norm_9600));
/* enable autoexpo */
/* i2c_w_mask(sd, 0x13, 0x05, 0x05); */
break;
}
return gspca_dev->usb_err;
error:
gspca_err(gspca_dev, "OV519 Config failed\n");
return -EINVAL;
}
/* function called at start time before URB creation */
static int sd_isoc_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
switch (sd->bridge) {
case BRIDGE_OVFX2:
if (gspca_dev->pixfmt.width != 800)
gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
else
gspca_dev->cam.bulk_size = 7 * 4096;
break;
}
return 0;
}
/* Set up the OV511/OV511+ with the given image parameters.
*
* Do not put any sensor-specific code in here (including I2C I/O functions)
*/
static void ov511_mode_init_regs(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int hsegs, vsegs, packet_size, fps, needed;
int interlaced = 0;
struct usb_host_interface *alt;
struct usb_interface *intf;
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
gspca_err(gspca_dev, "Couldn't get altsetting\n");
sd->gspca_dev.usb_err = -EIO;
return;
}
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
reg_w(sd, R511_CAM_UV_EN, 0x01);
reg_w(sd, R511_SNAP_UV_EN, 0x01);
reg_w(sd, R511_SNAP_OPTS, 0x03);
/* Here I'm assuming that snapshot size == image size.
* I hope that's always true. --claudio
*/
hsegs = (sd->gspca_dev.pixfmt.width >> 3) - 1;
vsegs = (sd->gspca_dev.pixfmt.height >> 3) - 1;
reg_w(sd, R511_CAM_PXCNT, hsegs);
reg_w(sd, R511_CAM_LNCNT, vsegs);
reg_w(sd, R511_CAM_PXDIV, 0x00);
reg_w(sd, R511_CAM_LNDIV, 0x00);
/* YUV420, low pass filter on */
reg_w(sd, R511_CAM_OPTS, 0x03);
/* Snapshot additions */
reg_w(sd, R511_SNAP_PXCNT, hsegs);
reg_w(sd, R511_SNAP_LNCNT, vsegs);
reg_w(sd, R511_SNAP_PXDIV, 0x00);
reg_w(sd, R511_SNAP_LNDIV, 0x00);
/******** Set the framerate ********/
if (frame_rate > 0)
sd->frame_rate = frame_rate;
switch (sd->sensor) {
case SEN_OV6620:
/* No framerate control, doesn't like higher rates yet */
sd->clockdiv = 3;
break;
/* Note once the FIXME's in mode_init_ov_sensor_regs() are fixed
for more sensors we need to do this for them too */
case SEN_OV7620:
case SEN_OV7620AE:
case SEN_OV7640:
case SEN_OV7648:
case SEN_OV76BE:
if (sd->gspca_dev.pixfmt.width == 320)
interlaced = 1;
/* Fall through */
case SEN_OV6630:
case SEN_OV7610:
case SEN_OV7670:
switch (sd->frame_rate) {
case 30:
case 25:
/* Not enough bandwidth to do 640x480 @ 30 fps */
if (sd->gspca_dev.pixfmt.width != 640) {
sd->clockdiv = 0;
break;
}
/* For 640x480 case */
/* fall through */
default:
/* case 20: */
/* case 15: */
sd->clockdiv = 1;
break;
case 10:
sd->clockdiv = 2;
break;
case 5:
sd->clockdiv = 5;
break;
}
if (interlaced) {
sd->clockdiv = (sd->clockdiv + 1) * 2 - 1;
/* Higher then 10 does not work */
if (sd->clockdiv > 10)
sd->clockdiv = 10;
}
break;
case SEN_OV8610:
/* No framerate control ?? */
sd->clockdiv = 0;
break;
}
/* Check if we have enough bandwidth to disable compression */
fps = (interlaced ? 60 : 30) / (sd->clockdiv + 1) + 1;
needed = fps * sd->gspca_dev.pixfmt.width *
sd->gspca_dev.pixfmt.height * 3 / 2;
/* 1000 isoc packets/sec */
if (needed > 1000 * packet_size) {
/* Enable Y and UV quantization and compression */
reg_w(sd, R511_COMP_EN, 0x07);
reg_w(sd, R511_COMP_LUT_EN, 0x03);
} else {
reg_w(sd, R511_COMP_EN, 0x06);
reg_w(sd, R511_COMP_LUT_EN, 0x00);
}
reg_w(sd, R51x_SYS_RESET, OV511_RESET_OMNICE);
reg_w(sd, R51x_SYS_RESET, 0);
}
/* Sets up the OV518/OV518+ with the given image parameters
*
* OV518 needs a completely different approach, until we can figure out what
* the individual registers do. Also, only 15 FPS is supported now.
*
* Do not put any sensor-specific code in here (including I2C I/O functions)
*/
static void ov518_mode_init_regs(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int hsegs, vsegs, packet_size;
struct usb_host_interface *alt;
struct usb_interface *intf;
intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface);
alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
if (!alt) {
gspca_err(gspca_dev, "Couldn't get altsetting\n");
sd->gspca_dev.usb_err = -EIO;
return;
}
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
/******** Set the mode ********/
reg_w(sd, 0x2b, 0);
reg_w(sd, 0x2c, 0);
reg_w(sd, 0x2d, 0);
reg_w(sd, 0x2e, 0);
reg_w(sd, 0x3b, 0);
reg_w(sd, 0x3c, 0);
reg_w(sd, 0x3d, 0);
reg_w(sd, 0x3e, 0);
if (sd->bridge == BRIDGE_OV518) {
/* Set 8-bit (YVYU) input format */
reg_w_mask(sd, 0x20, 0x08, 0x08);
/* Set 12-bit (4:2:0) output format */
reg_w_mask(sd, 0x28, 0x80, 0xf0);
reg_w_mask(sd, 0x38, 0x80, 0xf0);
} else {
reg_w(sd, 0x28, 0x80);
reg_w(sd, 0x38, 0x80);
}
hsegs = sd->gspca_dev.pixfmt.width / 16;
vsegs = sd->gspca_dev.pixfmt.height / 4;
reg_w(sd, 0x29, hsegs);
reg_w(sd, 0x2a, vsegs);
reg_w(sd, 0x39, hsegs);
reg_w(sd, 0x3a, vsegs);
/* Windows driver does this here; who knows why */
reg_w(sd, 0x2f, 0x80);
/******** Set the framerate ********/
if (sd->bridge == BRIDGE_OV518PLUS && sd->revision == 0 &&
sd->sensor == SEN_OV7620AE)
sd->clockdiv = 0;
else
sd->clockdiv = 1;
/* Mode independent, but framerate dependent, regs */
/* 0x51: Clock divider; Only works on some cams which use 2 crystals */
reg_w(sd, 0x51, 0x04);
reg_w(sd, 0x22, 0x18);
reg_w(sd, 0x23, 0xff);
if (sd->bridge == BRIDGE_OV518PLUS) {
switch (sd->sensor) {
case SEN_OV7620AE:
/*
* HdG: 640x480 needs special handling on device
* revision 2, we check for device revision > 0 to
* avoid regressions, as we don't know the correct
* thing todo for revision 1.
*
* Also this likely means we don't need to
* differentiate between the OV7620 and OV7620AE,
* earlier testing hitting this same problem likely
* happened to be with revision < 2 cams using an
* OV7620 and revision 2 cams using an OV7620AE.
*/
if (sd->revision > 0 &&
sd->gspca_dev.pixfmt.width == 640) {
reg_w(sd, 0x20, 0x60);
reg_w(sd, 0x21, 0x1f);
} else {
reg_w(sd, 0x20, 0x00);
reg_w(sd, 0x21, 0x19);
}
break;
case SEN_OV7620:
reg_w(sd, 0x20, 0x00);
reg_w(sd, 0x21, 0x19);
break;
default:
reg_w(sd, 0x21, 0x19);
}
} else
reg_w(sd, 0x71, 0x17); /* Compression-related? */
/* FIXME: Sensor-specific */
/* Bit 5 is what matters here. Of course, it is "reserved" */
i2c_w(sd, 0x54, 0x23);
reg_w(sd, 0x2f, 0x80);
if (sd->bridge == BRIDGE_OV518PLUS) {
reg_w(sd, 0x24, 0x94);
reg_w(sd, 0x25, 0x90);
ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */
ov518_reg_w32(sd, 0xc6, 540, 2); /* 21ch */
ov518_reg_w32(sd, 0xc7, 540, 2); /* 21ch */
ov518_reg_w32(sd, 0xc8, 108, 2); /* 6ch */
ov518_reg_w32(sd, 0xca, 131098, 3); /* 2001ah */
ov518_reg_w32(sd, 0xcb, 532, 2); /* 214h */
ov518_reg_w32(sd, 0xcc, 2400, 2); /* 960h */
ov518_reg_w32(sd, 0xcd, 32, 2); /* 20h */
ov518_reg_w32(sd, 0xce, 608, 2); /* 260h */
} else {
reg_w(sd, 0x24, 0x9f);
reg_w(sd, 0x25, 0x90);
ov518_reg_w32(sd, 0xc4, 400, 2); /* 190h */
ov518_reg_w32(sd, 0xc6, 381, 2); /* 17dh */
ov518_reg_w32(sd, 0xc7, 381, 2); /* 17dh */
ov518_reg_w32(sd, 0xc8, 128, 2); /* 80h */
ov518_reg_w32(sd, 0xca, 183331, 3); /* 2cc23h */
ov518_reg_w32(sd, 0xcb, 746, 2); /* 2eah */
ov518_reg_w32(sd, 0xcc, 1750, 2); /* 6d6h */
ov518_reg_w32(sd, 0xcd, 45, 2); /* 2dh */
ov518_reg_w32(sd, 0xce, 851, 2); /* 353h */
}
reg_w(sd, 0x2f, 0x80);
}
/* Sets up the OV519 with the given image parameters
*
* OV519 needs a completely different approach, until we can figure out what
* the individual registers do.
*
* Do not put any sensor-specific code in here (including I2C I/O functions)
*/
static void ov519_mode_init_regs(struct sd *sd)
{
static const struct ov_regvals mode_init_519_ov7670[] = {
{ 0x5d, 0x03 }, /* Turn off suspend mode */
{ 0x53, 0x9f }, /* was 9b in 1.65-1.08 */
{ OV519_R54_EN_CLK1, 0x0f }, /* bit2 (jpeg enable) */
{ 0xa2, 0x20 }, /* a2-a5 are undocumented */
{ 0xa3, 0x18 },
{ 0xa4, 0x04 },
{ 0xa5, 0x28 },
{ 0x37, 0x00 }, /* SetUsbInit */
{ 0x55, 0x02 }, /* 4.096 Mhz audio clock */
/* Enable both fields, YUV Input, disable defect comp (why?) */
{ 0x20, 0x0c },
{ 0x21, 0x38 },
{ 0x22, 0x1d },
{ 0x17, 0x50 }, /* undocumented */
{ 0x37, 0x00 }, /* undocumented */
{ 0x40, 0xff }, /* I2C timeout counter */
{ 0x46, 0x00 }, /* I2C clock prescaler */
{ 0x59, 0x04 }, /* new from windrv 090403 */
{ 0xff, 0x00 }, /* undocumented */
/* windows reads 0x55 at this point, why? */
};
static const struct ov_regvals mode_init_519[] = {
{ 0x5d, 0x03 }, /* Turn off suspend mode */
{ 0x53, 0x9f }, /* was 9b in 1.65-1.08 */
{ OV519_R54_EN_CLK1, 0x0f }, /* bit2 (jpeg enable) */
{ 0xa2, 0x20 }, /* a2-a5 are undocumented */
{ 0xa3, 0x18 },
{ 0xa4, 0x04 },
{ 0xa5, 0x28 },
{ 0x37, 0x00 }, /* SetUsbInit */
{ 0x55, 0x02 }, /* 4.096 Mhz audio clock */
/* Enable both fields, YUV Input, disable defect comp (why?) */
{ 0x22, 0x1d },
{ 0x17, 0x50 }, /* undocumented */
{ 0x37, 0x00 }, /* undocumented */
{ 0x40, 0xff }, /* I2C timeout counter */
{ 0x46, 0x00 }, /* I2C clock prescaler */
{ 0x59, 0x04 }, /* new from windrv 090403 */
{ 0xff, 0x00 }, /* undocumented */
/* windows reads 0x55 at this point, why? */
};
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
/******** Set the mode ********/
switch (sd->sensor) {
default:
write_regvals(sd, mode_init_519, ARRAY_SIZE(mode_init_519));
if (sd->sensor == SEN_OV7640 ||
sd->sensor == SEN_OV7648) {
/* Select 8-bit input mode */
reg_w_mask(sd, OV519_R20_DFR, 0x10, 0x10);
}
break;
case SEN_OV7660:
return; /* done by ov519_set_mode/fr() */
case SEN_OV7670:
write_regvals(sd, mode_init_519_ov7670,
ARRAY_SIZE(mode_init_519_ov7670));
break;
}
reg_w(sd, OV519_R10_H_SIZE, sd->gspca_dev.pixfmt.width >> 4);
reg_w(sd, OV519_R11_V_SIZE, sd->gspca_dev.pixfmt.height >> 3);
if (sd->sensor == SEN_OV7670 &&
sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
reg_w(sd, OV519_R12_X_OFFSETL, 0x04);
else if (sd->sensor == SEN_OV7648 &&
sd->gspca_dev.cam.cam_mode[sd->gspca_dev.curr_mode].priv)
reg_w(sd, OV519_R12_X_OFFSETL, 0x01);
else
reg_w(sd, OV519_R12_X_OFFSETL, 0x00);
reg_w(sd, OV519_R13_X_OFFSETH, 0x00);
reg_w(sd, OV519_R14_Y_OFFSETL, 0x00);
reg_w(sd, OV519_R15_Y_OFFSETH, 0x00);
reg_w(sd, OV519_R16_DIVIDER, 0x00);
reg_w(sd, OV519_R25_FORMAT, 0x03); /* YUV422 */
reg_w(sd, 0x26, 0x00); /* Undocumented */
/******** Set the framerate ********/
if (frame_rate > 0)
sd->frame_rate = frame_rate;
/* FIXME: These are only valid at the max resolution. */
sd->clockdiv = 0;
switch (sd->sensor) {
case SEN_OV7640:
case SEN_OV7648:
switch (sd->frame_rate) {
default:
/* case 30: */
reg_w(sd, 0xa4, 0x0c);
reg_w(sd, 0x23, 0xff);
break;
case 25:
reg_w(sd, 0xa4, 0x0c);
reg_w(sd, 0x23, 0x1f);
break;
case 20:
reg_w(sd, 0xa4, 0x0c);
reg_w(sd, 0x23, 0x1b);
break;
case 15:
reg_w(sd, 0xa4, 0x04);
reg_w(sd, 0x23, 0xff);
sd->clockdiv = 1;
break;
case 10:
reg_w(sd, 0xa4, 0x04);
reg_w(sd, 0x23, 0x1f);
sd->clockdiv = 1;
break;
case 5:
reg_w(sd, 0xa4, 0x04);
reg_w(sd, 0x23, 0x1b);
sd->clockdiv = 1;
break;
}
break;
case SEN_OV8610:
switch (sd->frame_rate) {
default: /* 15 fps */
/* case 15: */
reg_w(sd, 0xa4, 0x06);
reg_w(sd, 0x23, 0xff);
break;
case 10:
reg_w(sd, 0xa4, 0x06);
reg_w(sd, 0x23, 0x1f);
break;
case 5:
reg_w(sd, 0xa4, 0x06);
reg_w(sd, 0x23, 0x1b);
break;
}
break;
case SEN_OV7670: /* guesses, based on 7640 */
gspca_dbg(gspca_dev, D_STREAM, "Setting framerate to %d fps\n",
(sd->frame_rate == 0) ? 15 : sd->frame_rate);
reg_w(sd, 0xa4, 0x10);
switch (sd->frame_rate) {
case 30:
reg_w(sd, 0x23, 0xff);
break;
case 20:
reg_w(sd, 0x23, 0x1b);
break;
default:
/* case 15: */
reg_w(sd, 0x23, 0xff);
sd->clockdiv = 1;
break;
}
break;
}
}
static void mode_init_ov_sensor_regs(struct sd *sd)
{
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
int qvga, xstart, xend, ystart, yend;
u8 v;
qvga = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv & 1;
/******** Mode (VGA/QVGA) and sensor specific regs ********/
switch (sd->sensor) {
case SEN_OV2610:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
return;
case SEN_OV2610AE: {
u8 v;
/* frame rates:
* 10fps / 5 fps for 1600x1200
* 40fps / 20fps for 800x600
*/
v = 80;
if (qvga) {
if (sd->frame_rate < 25)
v = 0x81;
} else {
if (sd->frame_rate < 10)
v = 0x81;
}
i2c_w(sd, 0x11, v);
i2c_w(sd, 0x12, qvga ? 0x60 : 0x20);
return;
}
case SEN_OV3610:
if (qvga) {
xstart = (1040 - gspca_dev->pixfmt.width) / 2 +
(0x1f << 4);
ystart = (776 - gspca_dev->pixfmt.height) / 2;
} else {
xstart = (2076 - gspca_dev->pixfmt.width) / 2 +
(0x10 << 4);
ystart = (1544 - gspca_dev->pixfmt.height) / 2;
}
xend = xstart + gspca_dev->pixfmt.width;
yend = ystart + gspca_dev->pixfmt.height;
/* Writing to the COMH register resets the other windowing regs
to their default values, so we must do this first. */
i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0xf0);
i2c_w_mask(sd, 0x32,
(((xend >> 1) & 7) << 3) | ((xstart >> 1) & 7),
0x3f);
i2c_w_mask(sd, 0x03,
(((yend >> 1) & 3) << 2) | ((ystart >> 1) & 3),
0x0f);
i2c_w(sd, 0x17, xstart >> 4);
i2c_w(sd, 0x18, xend >> 4);
i2c_w(sd, 0x19, ystart >> 3);
i2c_w(sd, 0x1a, yend >> 3);
return;
case SEN_OV8610:
/* For OV8610 qvga means qsvga */
i2c_w_mask(sd, OV7610_REG_COM_C, qvga ? (1 << 5) : 0, 1 << 5);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
i2c_w_mask(sd, 0x2d, 0x00, 0x40); /* from windrv 090403 */
i2c_w_mask(sd, 0x28, 0x20, 0x20); /* progressive mode on */
break;
case SEN_OV7610:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
case SEN_OV7620:
case SEN_OV7620AE:
case SEN_OV76BE:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
i2c_w(sd, 0x24, qvga ? 0x20 : 0x3a);
i2c_w(sd, 0x25, qvga ? 0x30 : 0x60);
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
i2c_w_mask(sd, 0x67, qvga ? 0xb0 : 0x90, 0xf0);
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
if (sd->sensor == SEN_OV76BE)
i2c_w(sd, 0x35, qvga ? 0x1e : 0x9e);
break;
case SEN_OV7640:
case SEN_OV7648:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x28, qvga ? 0x00 : 0x20, 0x20);
/* Setting this undocumented bit in qvga mode removes a very
annoying vertical shaking of the image */
i2c_w_mask(sd, 0x2d, qvga ? 0x40 : 0x00, 0x40);
/* Unknown */
i2c_w_mask(sd, 0x67, qvga ? 0xf0 : 0x90, 0xf0);
/* Allow higher automatic gain (to allow higher framerates) */
i2c_w_mask(sd, 0x74, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x12, 0x04, 0x04); /* AWB: 1 */
break;
case SEN_OV7670:
/* set COM7_FMT_VGA or COM7_FMT_QVGA
* do we need to set anything else?
* HSTART etc are set in set_ov_sensor_window itself */
i2c_w_mask(sd, OV7670_R12_COM7,
qvga ? OV7670_COM7_FMT_QVGA : OV7670_COM7_FMT_VGA,
OV7670_COM7_FMT_MASK);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_AWB,
OV7670_COM8_AWB);
if (qvga) { /* QVGA from ov7670.c by
* Jonathan Corbet */
xstart = 164;
xend = 28;
ystart = 14;
yend = 494;
} else { /* VGA */
xstart = 158;
xend = 14;
ystart = 10;
yend = 490;
}
/* OV7670 hardware window registers are split across
* multiple locations */
i2c_w(sd, OV7670_R17_HSTART, xstart >> 3);
i2c_w(sd, OV7670_R18_HSTOP, xend >> 3);
v = i2c_r(sd, OV7670_R32_HREF);
v = (v & 0xc0) | ((xend & 0x7) << 3) | (xstart & 0x07);
msleep(10); /* need to sleep between read and write to
* same reg! */
i2c_w(sd, OV7670_R32_HREF, v);
i2c_w(sd, OV7670_R19_VSTART, ystart >> 2);
i2c_w(sd, OV7670_R1A_VSTOP, yend >> 2);
v = i2c_r(sd, OV7670_R03_VREF);
v = (v & 0xc0) | ((yend & 0x3) << 2) | (ystart & 0x03);
msleep(10); /* need to sleep between read and write to
* same reg! */
i2c_w(sd, OV7670_R03_VREF, v);
break;
case SEN_OV6620:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x13, 0x00, 0x20); /* Select 16 bit data bus */
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
case SEN_OV6630:
case SEN_OV66308AF:
i2c_w_mask(sd, 0x14, qvga ? 0x20 : 0x00, 0x20);
i2c_w_mask(sd, 0x12, 0x04, 0x06); /* AWB: 1 Test pattern: 0 */
break;
case SEN_OV9600: {
const struct ov_i2c_regvals *vals;
static const struct ov_i2c_regvals sxga_15[] = {
{0x11, 0x80}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
};
static const struct ov_i2c_regvals sxga_7_5[] = {
{0x11, 0x81}, {0x14, 0x3e}, {0x24, 0x85}, {0x25, 0x75}
};
static const struct ov_i2c_regvals vga_30[] = {
{0x11, 0x81}, {0x14, 0x7e}, {0x24, 0x70}, {0x25, 0x60}
};
static const struct ov_i2c_regvals vga_15[] = {
{0x11, 0x83}, {0x14, 0x3e}, {0x24, 0x80}, {0x25, 0x70}
};
/* frame rates:
* 15fps / 7.5 fps for 1280x1024
* 30fps / 15fps for 640x480
*/
i2c_w_mask(sd, 0x12, qvga ? 0x40 : 0x00, 0x40);
if (qvga)
vals = sd->frame_rate < 30 ? vga_15 : vga_30;
else
vals = sd->frame_rate < 15 ? sxga_7_5 : sxga_15;
write_i2c_regvals(sd, vals, ARRAY_SIZE(sxga_15));
return;
}
default:
return;
}
/******** Clock programming ********/
i2c_w(sd, 0x11, sd->clockdiv);
}
/* this function works for bridge ov519 and sensors ov7660 and ov7670 only */
static void sethvflip(struct gspca_dev *gspca_dev, s32 hflip, s32 vflip)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->gspca_dev.streaming)
reg_w(sd, OV519_R51_RESET1, 0x0f); /* block stream */
i2c_w_mask(sd, OV7670_R1E_MVFP,
OV7670_MVFP_MIRROR * hflip | OV7670_MVFP_VFLIP * vflip,
OV7670_MVFP_MIRROR | OV7670_MVFP_VFLIP);
if (sd->gspca_dev.streaming)
reg_w(sd, OV519_R51_RESET1, 0x00); /* restart stream */
}
static void set_ov_sensor_window(struct sd *sd)
{
struct gspca_dev *gspca_dev;
int qvga, crop;
int hwsbase, hwebase, vwsbase, vwebase, hwscale, vwscale;
/* mode setup is fully handled in mode_init_ov_sensor_regs for these */
switch (sd->sensor) {
case SEN_OV2610:
case SEN_OV2610AE:
case SEN_OV3610:
case SEN_OV7670:
case SEN_OV9600:
mode_init_ov_sensor_regs(sd);
return;
case SEN_OV7660:
ov519_set_mode(sd);
ov519_set_fr(sd);
return;
}
gspca_dev = &sd->gspca_dev;
qvga = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv & 1;
crop = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv & 2;
/* The different sensor ICs handle setting up of window differently.
* IF YOU SET IT WRONG, YOU WILL GET ALL ZERO ISOC DATA FROM OV51x!! */
switch (sd->sensor) {
case SEN_OV8610:
hwsbase = 0x1e;
hwebase = 0x1e;
vwsbase = 0x02;
vwebase = 0x02;
break;
case SEN_OV7610:
case SEN_OV76BE:
hwsbase = 0x38;
hwebase = 0x3a;
vwsbase = vwebase = 0x05;
break;
case SEN_OV6620:
case SEN_OV6630:
case SEN_OV66308AF:
hwsbase = 0x38;
hwebase = 0x3a;
vwsbase = 0x05;
vwebase = 0x06;
if (sd->sensor == SEN_OV66308AF && qvga)
/* HDG: this fixes U and V getting swapped */
hwsbase++;
if (crop) {
hwsbase += 8;
hwebase += 8;
vwsbase += 11;
vwebase += 11;
}
break;
case SEN_OV7620:
case SEN_OV7620AE:
hwsbase = 0x2f; /* From 7620.SET (spec is wrong) */
hwebase = 0x2f;
vwsbase = vwebase = 0x05;
break;
case SEN_OV7640:
case SEN_OV7648:
hwsbase = 0x1a;
hwebase = 0x1a;
vwsbase = vwebase = 0x03;
break;
default:
return;
}
switch (sd->sensor) {
case SEN_OV6620:
case SEN_OV6630:
case SEN_OV66308AF:
if (qvga) { /* QCIF */
hwscale = 0;
vwscale = 0;
} else { /* CIF */
hwscale = 1;
vwscale = 1; /* The datasheet says 0;
* it's wrong */
}
break;
case SEN_OV8610:
if (qvga) { /* QSVGA */
hwscale = 1;
vwscale = 1;
} else { /* SVGA */
hwscale = 2;
vwscale = 2;
}
break;
default: /* SEN_OV7xx0 */
if (qvga) { /* QVGA */
hwscale = 1;
vwscale = 0;
} else { /* VGA */
hwscale = 2;
vwscale = 1;
}
}
mode_init_ov_sensor_regs(sd);
i2c_w(sd, 0x17, hwsbase);
i2c_w(sd, 0x18, hwebase + (sd->sensor_width >> hwscale));
i2c_w(sd, 0x19, vwsbase);
i2c_w(sd, 0x1a, vwebase + (sd->sensor_height >> vwscale));
}
/* -- start the camera -- */
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
/* Default for most bridges, allow bridge_mode_init_regs to override */
sd->sensor_width = sd->gspca_dev.pixfmt.width;
sd->sensor_height = sd->gspca_dev.pixfmt.height;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
ov511_mode_init_regs(sd);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ov518_mode_init_regs(sd);
break;
case BRIDGE_OV519:
ov519_mode_init_regs(sd);
break;
/* case BRIDGE_OVFX2: nothing to do */
case BRIDGE_W9968CF:
w9968cf_mode_init_regs(sd);
break;
}
set_ov_sensor_window(sd);
/* Force clear snapshot state in case the snapshot button was
pressed while we weren't streaming */
sd->snapshot_needs_reset = 1;
sd_reset_snapshot(gspca_dev);
sd->first_frame = 3;
ov51x_restart(sd);
ov51x_led_control(sd, 1);
return gspca_dev->usb_err;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
ov51x_stop(sd);
ov51x_led_control(sd, 0);
}
static void sd_stop0(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (!sd->gspca_dev.present)
return;
if (sd->bridge == BRIDGE_W9968CF)
w9968cf_stop0(sd);
#if IS_ENABLED(CONFIG_INPUT)
/* If the last button state is pressed, release it now! */
if (sd->snapshot_pressed) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0);
input_sync(gspca_dev->input_dev);
sd->snapshot_pressed = 0;
}
#endif
if (sd->bridge == BRIDGE_OV519)
reg_w(sd, OV519_R57_SNAPSHOT, 0x23);
}
static void ov51x_handle_button(struct gspca_dev *gspca_dev, u8 state)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->snapshot_pressed != state) {
#if IS_ENABLED(CONFIG_INPUT)
input_report_key(gspca_dev->input_dev, KEY_CAMERA, state);
input_sync(gspca_dev->input_dev);
#endif
if (state)
sd->snapshot_needs_reset = 1;
sd->snapshot_pressed = state;
} else {
/* On the ov511 / ov519 we need to reset the button state
multiple times, as resetting does not work as long as the
button stays pressed */
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
case BRIDGE_OV519:
if (state)
sd->snapshot_needs_reset = 1;
break;
}
}
}
static void ov511_pkt_scan(struct gspca_dev *gspca_dev,
u8 *in, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
/* SOF/EOF packets have 1st to 8th bytes zeroed and the 9th
* byte non-zero. The EOF packet has image width/height in the
* 10th and 11th bytes. The 9th byte is given as follows:
*
* bit 7: EOF
* 6: compression enabled
* 5: 422/420/400 modes
* 4: 422/420/400 modes
* 3: 1
* 2: snapshot button on
* 1: snapshot frame
* 0: even/odd field
*/
if (!(in[0] | in[1] | in[2] | in[3] | in[4] | in[5] | in[6] | in[7]) &&
(in[8] & 0x08)) {
ov51x_handle_button(gspca_dev, (in[8] >> 2) & 1);
if (in[8] & 0x80) {
/* Frame end */
if ((in[9] + 1) * 8 != gspca_dev->pixfmt.width ||
(in[10] + 1) * 8 != gspca_dev->pixfmt.height) {
gspca_err(gspca_dev, "Invalid frame size, got: %dx%d, requested: %dx%d\n",
(in[9] + 1) * 8, (in[10] + 1) * 8,
gspca_dev->pixfmt.width,
gspca_dev->pixfmt.height);
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
/* Add 11 byte footer to frame, might be useful */
gspca_frame_add(gspca_dev, LAST_PACKET, in, 11);
return;
} else {
/* Frame start */
gspca_frame_add(gspca_dev, FIRST_PACKET, in, 0);
sd->packet_nr = 0;
}
}
/* Ignore the packet number */
len--;
/* intermediate packet */
gspca_frame_add(gspca_dev, INTER_PACKET, in, len);
}
static void ov518_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
/* A false positive here is likely, until OVT gives me
* the definitive SOF/EOF format */
if ((!(data[0] | data[1] | data[2] | data[3] | data[5])) && data[6]) {
ov51x_handle_button(gspca_dev, (data[6] >> 1) & 1);
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
sd->packet_nr = 0;
}
if (gspca_dev->last_packet_type == DISCARD_PACKET)
return;
/* Does this device use packet numbers ? */
if (len & 7) {
len--;
if (sd->packet_nr == data[len])
sd->packet_nr++;
/* The last few packets of the frame (which are all 0's
except that they may contain part of the footer), are
numbered 0 */
else if (sd->packet_nr == 0 || data[len]) {
gspca_err(gspca_dev, "Invalid packet nr: %d (expect: %d)\n",
(int)data[len], (int)sd->packet_nr);
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
}
}
/* intermediate packet */
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
static void ov519_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
/* Header of ov519 is 16 bytes:
* Byte Value Description
* 0 0xff magic
* 1 0xff magic
* 2 0xff magic
* 3 0xXX 0x50 = SOF, 0x51 = EOF
* 9 0xXX 0x01 initial frame without data,
* 0x00 standard frame with image
* 14 Lo in EOF: length of image data / 8
* 15 Hi
*/
if (data[0] == 0xff && data[1] == 0xff && data[2] == 0xff) {
switch (data[3]) {
case 0x50: /* start of frame */
/* Don't check the button state here, as the state
usually (always ?) changes at EOF and checking it
here leads to unnecessary snapshot state resets. */
#define HDRSZ 16
data += HDRSZ;
len -= HDRSZ;
#undef HDRSZ
if (data[0] == 0xff || data[1] == 0xd8)
gspca_frame_add(gspca_dev, FIRST_PACKET,
data, len);
else
gspca_dev->last_packet_type = DISCARD_PACKET;
return;
case 0x51: /* end of frame */
ov51x_handle_button(gspca_dev, data[11] & 1);
if (data[9] != 0)
gspca_dev->last_packet_type = DISCARD_PACKET;
gspca_frame_add(gspca_dev, LAST_PACKET,
NULL, 0);
return;
}
}
/* intermediate packet */
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
}
static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
/* A short read signals EOF */
if (len < gspca_dev->cam.bulk_size) {
/* If the frame is short, and it is one of the first ones
the sensor and bridge are still syncing, so drop it. */
if (sd->first_frame) {
sd->first_frame--;
if (gspca_dev->image_len <
sd->gspca_dev.pixfmt.width *
sd->gspca_dev.pixfmt.height)
gspca_dev->last_packet_type = DISCARD_PACKET;
}
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0);
}
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
struct sd *sd = (struct sd *) gspca_dev;
switch (sd->bridge) {
case BRIDGE_OV511:
case BRIDGE_OV511PLUS:
ov511_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_OV518:
case BRIDGE_OV518PLUS:
ov518_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_OV519:
ov519_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_OVFX2:
ovfx2_pkt_scan(gspca_dev, data, len);
break;
case BRIDGE_W9968CF:
w9968cf_pkt_scan(gspca_dev, data, len);
break;
}
}
/* -- management routines -- */
static void setbrightness(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
static const struct ov_i2c_regvals brit_7660[][7] = {
{{0x0f, 0x6a}, {0x24, 0x40}, {0x25, 0x2b}, {0x26, 0x90},
{0x27, 0xe0}, {0x28, 0xe0}, {0x2c, 0xe0}},
{{0x0f, 0x6a}, {0x24, 0x50}, {0x25, 0x40}, {0x26, 0xa1},
{0x27, 0xc0}, {0x28, 0xc0}, {0x2c, 0xc0}},
{{0x0f, 0x6a}, {0x24, 0x68}, {0x25, 0x58}, {0x26, 0xc2},
{0x27, 0xa0}, {0x28, 0xa0}, {0x2c, 0xa0}},
{{0x0f, 0x6a}, {0x24, 0x70}, {0x25, 0x68}, {0x26, 0xd3},
{0x27, 0x80}, {0x28, 0x80}, {0x2c, 0x80}},
{{0x0f, 0x6a}, {0x24, 0x80}, {0x25, 0x70}, {0x26, 0xd3},
{0x27, 0x20}, {0x28, 0x20}, {0x2c, 0x20}},
{{0x0f, 0x6a}, {0x24, 0x88}, {0x25, 0x78}, {0x26, 0xd3},
{0x27, 0x40}, {0x28, 0x40}, {0x2c, 0x40}},
{{0x0f, 0x6a}, {0x24, 0x90}, {0x25, 0x80}, {0x26, 0xd4},
{0x27, 0x60}, {0x28, 0x60}, {0x2c, 0x60}}
};
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
case SEN_OV66308AF:
case SEN_OV7640:
case SEN_OV7648:
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7620:
case SEN_OV7620AE:
i2c_w(sd, OV7610_REG_BRT, val);
break;
case SEN_OV7660:
write_i2c_regvals(sd, brit_7660[val],
ARRAY_SIZE(brit_7660[0]));
break;
case SEN_OV7670:
/*win trace
* i2c_w_mask(sd, OV7670_R13_COM8, 0, OV7670_COM8_AEC); */
i2c_w(sd, OV7670_R55_BRIGHT, ov7670_abs_to_sm(val));
break;
}
}
static void setcontrast(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
static const struct ov_i2c_regvals contrast_7660[][31] = {
{{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf8}, {0x6f, 0xa0},
{0x70, 0x58}, {0x71, 0x38}, {0x72, 0x30}, {0x73, 0x30},
{0x74, 0x28}, {0x75, 0x28}, {0x76, 0x24}, {0x77, 0x24},
{0x78, 0x22}, {0x79, 0x28}, {0x7a, 0x2a}, {0x7b, 0x34},
{0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3d}, {0x7f, 0x65},
{0x80, 0x70}, {0x81, 0x77}, {0x82, 0x7d}, {0x83, 0x83},
{0x84, 0x88}, {0x85, 0x8d}, {0x86, 0x96}, {0x87, 0x9f},
{0x88, 0xb0}, {0x89, 0xc4}, {0x8a, 0xd9}},
{{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf8}, {0x6f, 0x94},
{0x70, 0x58}, {0x71, 0x40}, {0x72, 0x30}, {0x73, 0x30},
{0x74, 0x30}, {0x75, 0x30}, {0x76, 0x2c}, {0x77, 0x24},
{0x78, 0x22}, {0x79, 0x28}, {0x7a, 0x2a}, {0x7b, 0x31},
{0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3d}, {0x7f, 0x62},
{0x80, 0x6d}, {0x81, 0x75}, {0x82, 0x7b}, {0x83, 0x81},
{0x84, 0x87}, {0x85, 0x8d}, {0x86, 0x98}, {0x87, 0xa1},
{0x88, 0xb2}, {0x89, 0xc6}, {0x8a, 0xdb}},
{{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf0}, {0x6f, 0x84},
{0x70, 0x58}, {0x71, 0x48}, {0x72, 0x40}, {0x73, 0x40},
{0x74, 0x28}, {0x75, 0x28}, {0x76, 0x28}, {0x77, 0x24},
{0x78, 0x26}, {0x79, 0x28}, {0x7a, 0x28}, {0x7b, 0x34},
{0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3c}, {0x7f, 0x5d},
{0x80, 0x68}, {0x81, 0x71}, {0x82, 0x79}, {0x83, 0x81},
{0x84, 0x86}, {0x85, 0x8b}, {0x86, 0x95}, {0x87, 0x9e},
{0x88, 0xb1}, {0x89, 0xc5}, {0x8a, 0xd9}},
{{0x6c, 0xf0}, {0x6d, 0xf0}, {0x6e, 0xf0}, {0x6f, 0x70},
{0x70, 0x58}, {0x71, 0x58}, {0x72, 0x48}, {0x73, 0x48},
{0x74, 0x38}, {0x75, 0x40}, {0x76, 0x34}, {0x77, 0x34},
{0x78, 0x2e}, {0x79, 0x28}, {0x7a, 0x24}, {0x7b, 0x22},
{0x7c, 0x0f}, {0x7d, 0x1e}, {0x7e, 0x3c}, {0x7f, 0x58},
{0x80, 0x63}, {0x81, 0x6e}, {0x82, 0x77}, {0x83, 0x80},
{0x84, 0x87}, {0x85, 0x8f}, {0x86, 0x9c}, {0x87, 0xa9},
{0x88, 0xc0}, {0x89, 0xd4}, {0x8a, 0xe6}},
{{0x6c, 0xa0}, {0x6d, 0xf0}, {0x6e, 0x90}, {0x6f, 0x80},
{0x70, 0x70}, {0x71, 0x80}, {0x72, 0x60}, {0x73, 0x60},
{0x74, 0x58}, {0x75, 0x60}, {0x76, 0x4c}, {0x77, 0x38},
{0x78, 0x38}, {0x79, 0x2a}, {0x7a, 0x20}, {0x7b, 0x0e},
{0x7c, 0x0a}, {0x7d, 0x14}, {0x7e, 0x26}, {0x7f, 0x46},
{0x80, 0x54}, {0x81, 0x64}, {0x82, 0x70}, {0x83, 0x7c},
{0x84, 0x87}, {0x85, 0x93}, {0x86, 0xa6}, {0x87, 0xb4},
{0x88, 0xd0}, {0x89, 0xe5}, {0x8a, 0xf5}},
{{0x6c, 0x60}, {0x6d, 0x80}, {0x6e, 0x60}, {0x6f, 0x80},
{0x70, 0x80}, {0x71, 0x80}, {0x72, 0x88}, {0x73, 0x30},
{0x74, 0x70}, {0x75, 0x68}, {0x76, 0x64}, {0x77, 0x50},
{0x78, 0x3c}, {0x79, 0x22}, {0x7a, 0x10}, {0x7b, 0x08},
{0x7c, 0x06}, {0x7d, 0x0e}, {0x7e, 0x1a}, {0x7f, 0x3a},
{0x80, 0x4a}, {0x81, 0x5a}, {0x82, 0x6b}, {0x83, 0x7b},
{0x84, 0x89}, {0x85, 0x96}, {0x86, 0xaf}, {0x87, 0xc3},
{0x88, 0xe1}, {0x89, 0xf2}, {0x8a, 0xfa}},
{{0x6c, 0x20}, {0x6d, 0x40}, {0x6e, 0x20}, {0x6f, 0x60},
{0x70, 0x88}, {0x71, 0xc8}, {0x72, 0xc0}, {0x73, 0xb8},
{0x74, 0xa8}, {0x75, 0xb8}, {0x76, 0x80}, {0x77, 0x5c},
{0x78, 0x26}, {0x79, 0x10}, {0x7a, 0x08}, {0x7b, 0x04},
{0x7c, 0x02}, {0x7d, 0x06}, {0x7e, 0x0a}, {0x7f, 0x22},
{0x80, 0x33}, {0x81, 0x4c}, {0x82, 0x64}, {0x83, 0x7b},
{0x84, 0x90}, {0x85, 0xa7}, {0x86, 0xc7}, {0x87, 0xde},
{0x88, 0xf1}, {0x89, 0xf9}, {0x8a, 0xfd}},
};
switch (sd->sensor) {
case SEN_OV7610:
case SEN_OV6620:
i2c_w(sd, OV7610_REG_CNT, val);
break;
case SEN_OV6630:
case SEN_OV66308AF:
i2c_w_mask(sd, OV7610_REG_CNT, val >> 4, 0x0f);
break;
case SEN_OV8610: {
static const u8 ctab[] = {
0x03, 0x09, 0x0b, 0x0f, 0x53, 0x6f, 0x35, 0x7f
};
/* Use Y gamma control instead. Bit 0 enables it. */
i2c_w(sd, 0x64, ctab[val >> 5]);
break;
}
case SEN_OV7620:
case SEN_OV7620AE: {
static const u8 ctab[] = {
0x01, 0x05, 0x09, 0x11, 0x15, 0x35, 0x37, 0x57,
0x5b, 0xa5, 0xa7, 0xc7, 0xc9, 0xcf, 0xef, 0xff
};
/* Use Y gamma control instead. Bit 0 enables it. */
i2c_w(sd, 0x64, ctab[val >> 4]);
break;
}
case SEN_OV7660:
write_i2c_regvals(sd, contrast_7660[val],
ARRAY_SIZE(contrast_7660[0]));
break;
case SEN_OV7670:
/* check that this isn't just the same as ov7610 */
i2c_w(sd, OV7670_R56_CONTRAS, val >> 1);
break;
}
}
static void setexposure(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
i2c_w(sd, 0x10, val);
}
static void setcolors(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
static const struct ov_i2c_regvals colors_7660[][6] = {
{{0x4f, 0x28}, {0x50, 0x2a}, {0x51, 0x02}, {0x52, 0x0a},
{0x53, 0x19}, {0x54, 0x23}},
{{0x4f, 0x47}, {0x50, 0x4a}, {0x51, 0x03}, {0x52, 0x11},
{0x53, 0x2c}, {0x54, 0x3e}},
{{0x4f, 0x66}, {0x50, 0x6b}, {0x51, 0x05}, {0x52, 0x19},
{0x53, 0x40}, {0x54, 0x59}},
{{0x4f, 0x84}, {0x50, 0x8b}, {0x51, 0x06}, {0x52, 0x20},
{0x53, 0x53}, {0x54, 0x73}},
{{0x4f, 0xa3}, {0x50, 0xab}, {0x51, 0x08}, {0x52, 0x28},
{0x53, 0x66}, {0x54, 0x8e}},
};
switch (sd->sensor) {
case SEN_OV8610:
case SEN_OV7610:
case SEN_OV76BE:
case SEN_OV6620:
case SEN_OV6630:
case SEN_OV66308AF:
i2c_w(sd, OV7610_REG_SAT, val);
break;
case SEN_OV7620:
case SEN_OV7620AE:
/* Use UV gamma control instead. Bits 0 & 7 are reserved. */
/* rc = ov_i2c_write(sd->dev, 0x62, (val >> 9) & 0x7e);
if (rc < 0)
goto out; */
i2c_w(sd, OV7610_REG_SAT, val);
break;
case SEN_OV7640:
case SEN_OV7648:
i2c_w(sd, OV7610_REG_SAT, val & 0xf0);
break;
case SEN_OV7660:
write_i2c_regvals(sd, colors_7660[val],
ARRAY_SIZE(colors_7660[0]));
break;
case SEN_OV7670:
/* supported later once I work out how to do it
* transparently fail now! */
/* set REG_COM13 values for UV sat auto mode */
break;
}
}
static void setautobright(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
i2c_w_mask(sd, 0x2d, val ? 0x10 : 0x00, 0x10);
}
static void setfreq_i(struct sd *sd, s32 val)
{
if (sd->sensor == SEN_OV7660
|| sd->sensor == SEN_OV7670) {
switch (val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, OV7670_R13_COM8, 0, OV7670_COM8_BFILT);
break;
case 1: /* 50 hz */
i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_BFILT,
OV7670_COM8_BFILT);
i2c_w_mask(sd, OV7670_R3B_COM11, 0x08, 0x18);
break;
case 2: /* 60 hz */
i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_BFILT,
OV7670_COM8_BFILT);
i2c_w_mask(sd, OV7670_R3B_COM11, 0x00, 0x18);
break;
case 3: /* Auto hz - ov7670 only */
i2c_w_mask(sd, OV7670_R13_COM8, OV7670_COM8_BFILT,
OV7670_COM8_BFILT);
i2c_w_mask(sd, OV7670_R3B_COM11, OV7670_COM11_HZAUTO,
0x18);
break;
}
} else {
switch (val) {
case 0: /* Banding filter disabled */
i2c_w_mask(sd, 0x2d, 0x00, 0x04);
i2c_w_mask(sd, 0x2a, 0x00, 0x80);
break;
case 1: /* 50 hz (filter on and framerate adj) */
i2c_w_mask(sd, 0x2d, 0x04, 0x04);
i2c_w_mask(sd, 0x2a, 0x80, 0x80);
/* 20 fps -> 16.667 fps */
if (sd->sensor == SEN_OV6620 ||
sd->sensor == SEN_OV6630 ||
sd->sensor == SEN_OV66308AF)
i2c_w(sd, 0x2b, 0x5e);
else
i2c_w(sd, 0x2b, 0xac);
break;
case 2: /* 60 hz (filter on, ...) */
i2c_w_mask(sd, 0x2d, 0x04, 0x04);
if (sd->sensor == SEN_OV6620 ||
sd->sensor == SEN_OV6630 ||
sd->sensor == SEN_OV66308AF) {
/* 20 fps -> 15 fps */
i2c_w_mask(sd, 0x2a, 0x80, 0x80);
i2c_w(sd, 0x2b, 0xa8);
} else {
/* no framerate adj. */
i2c_w_mask(sd, 0x2a, 0x00, 0x80);
}
break;
}
}
}
static void setfreq(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
setfreq_i(sd, val);
/* Ugly but necessary */
if (sd->bridge == BRIDGE_W9968CF)
w9968cf_set_crop_window(sd);
}
static int sd_get_jcomp(struct gspca_dev *gspca_dev,
struct v4l2_jpegcompression *jcomp)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->bridge != BRIDGE_W9968CF)
return -ENOTTY;
memset(jcomp, 0, sizeof *jcomp);
jcomp->quality = v4l2_ctrl_g_ctrl(sd->jpegqual);
jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT |
V4L2_JPEG_MARKER_DRI;
return 0;
}
static int sd_set_jcomp(struct gspca_dev *gspca_dev,
const struct v4l2_jpegcompression *jcomp)
{
struct sd *sd = (struct sd *) gspca_dev;
if (sd->bridge != BRIDGE_W9968CF)
return -ENOTTY;
v4l2_ctrl_s_ctrl(sd->jpegqual, jcomp->quality);
return 0;
}
static int sd_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
struct sd *sd = (struct sd *)gspca_dev;
gspca_dev->usb_err = 0;
switch (ctrl->id) {
case V4L2_CID_AUTOGAIN:
gspca_dev->exposure->val = i2c_r(sd, 0x10);
break;
}
return 0;
}
static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
struct sd *sd = (struct sd *)gspca_dev;
gspca_dev->usb_err = 0;
if (!gspca_dev->streaming)
return 0;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
setbrightness(gspca_dev, ctrl->val);
break;
case V4L2_CID_CONTRAST:
setcontrast(gspca_dev, ctrl->val);
break;
case V4L2_CID_POWER_LINE_FREQUENCY:
setfreq(gspca_dev, ctrl->val);
break;
case V4L2_CID_AUTOBRIGHTNESS:
if (ctrl->is_new)
setautobright(gspca_dev, ctrl->val);
if (!ctrl->val && sd->brightness->is_new)
setbrightness(gspca_dev, sd->brightness->val);
break;
case V4L2_CID_SATURATION:
setcolors(gspca_dev, ctrl->val);
break;
case V4L2_CID_HFLIP:
sethvflip(gspca_dev, ctrl->val, sd->vflip->val);
break;
case V4L2_CID_AUTOGAIN:
if (ctrl->is_new)
setautogain(gspca_dev, ctrl->val);
if (!ctrl->val && gspca_dev->exposure->is_new)
setexposure(gspca_dev, gspca_dev->exposure->val);
break;
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
return -EBUSY; /* Should never happen, as we grab the ctrl */
}
return gspca_dev->usb_err;
}
static const struct v4l2_ctrl_ops sd_ctrl_ops = {
.g_volatile_ctrl = sd_g_volatile_ctrl,
.s_ctrl = sd_s_ctrl,
};
static int sd_init_controls(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *)gspca_dev;
struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
gspca_dev->vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 10);
if (valid_controls[sd->sensor].has_brightness)
sd->brightness = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0,
sd->sensor == SEN_OV7660 ? 6 : 255, 1,
sd->sensor == SEN_OV7660 ? 3 : 127);
if (valid_controls[sd->sensor].has_contrast) {
if (sd->sensor == SEN_OV7660)
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_CONTRAST, 0, 6, 1, 3);
else
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_CONTRAST, 0, 255, 1,
(sd->sensor == SEN_OV6630 ||
sd->sensor == SEN_OV66308AF) ? 200 : 127);
}
if (valid_controls[sd->sensor].has_sat)
v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_SATURATION, 0,
sd->sensor == SEN_OV7660 ? 4 : 255, 1,
sd->sensor == SEN_OV7660 ? 2 : 127);
if (valid_controls[sd->sensor].has_exposure)
gspca_dev->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_EXPOSURE, 0, 255, 1, 127);
if (valid_controls[sd->sensor].has_hvflip) {
sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
}
if (valid_controls[sd->sensor].has_autobright)
sd->autobright = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_AUTOBRIGHTNESS, 0, 1, 1, 1);
if (valid_controls[sd->sensor].has_autogain)
gspca_dev->autogain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
if (valid_controls[sd->sensor].has_freq) {
if (sd->sensor == SEN_OV7670)
sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
V4L2_CID_POWER_LINE_FREQUENCY,
V4L2_CID_POWER_LINE_FREQUENCY_AUTO, 0,
V4L2_CID_POWER_LINE_FREQUENCY_AUTO);
else
sd->freq = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
V4L2_CID_POWER_LINE_FREQUENCY,
V4L2_CID_POWER_LINE_FREQUENCY_60HZ, 0, 0);
}
if (sd->bridge == BRIDGE_W9968CF)
sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY,
QUALITY_MIN, QUALITY_MAX, 1, QUALITY_DEF);
if (hdl->error) {
gspca_err(gspca_dev, "Could not initialize controls\n");
return hdl->error;
}
if (gspca_dev->autogain)
v4l2_ctrl_auto_cluster(3, &gspca_dev->autogain, 0, true);
if (sd->autobright)
v4l2_ctrl_auto_cluster(2, &sd->autobright, 0, false);
if (sd->hflip)
v4l2_ctrl_cluster(2, &sd->hflip);
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.config = sd_config,
.init = sd_init,
.init_controls = sd_init_controls,
.isoc_init = sd_isoc_init,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
.dq_callback = sd_reset_snapshot,
.get_jcomp = sd_get_jcomp,
.set_jcomp = sd_set_jcomp,
#if IS_ENABLED(CONFIG_INPUT)
.other_input = 1,
#endif
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x4003), .driver_info = BRIDGE_W9968CF },
{USB_DEVICE(0x041e, 0x4052),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x041e, 0x405f), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4060), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4061), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4064), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4067), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x041e, 0x4068), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x045e, 0x028c),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x054c, 0x0154), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x054c, 0x0155), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x0511), .driver_info = BRIDGE_OV511 },
{USB_DEVICE(0x05a9, 0x0518), .driver_info = BRIDGE_OV518 },
{USB_DEVICE(0x05a9, 0x0519),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x05a9, 0x0530),
.driver_info = BRIDGE_OV519 | BRIDGE_INVERT_LED },
{USB_DEVICE(0x05a9, 0x2800), .driver_info = BRIDGE_OVFX2 },
{USB_DEVICE(0x05a9, 0x4519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0x8519), .driver_info = BRIDGE_OV519 },
{USB_DEVICE(0x05a9, 0xa511), .driver_info = BRIDGE_OV511PLUS },
{USB_DEVICE(0x05a9, 0xa518), .driver_info = BRIDGE_OV518PLUS },
{USB_DEVICE(0x0813, 0x0002), .driver_info = BRIDGE_OV511PLUS },
{USB_DEVICE(0x0b62, 0x0059), .driver_info = BRIDGE_OVFX2 },
{USB_DEVICE(0x0e96, 0xc001), .driver_info = BRIDGE_OVFX2 },
{USB_DEVICE(0x1046, 0x9967), .driver_info = BRIDGE_W9968CF },
{USB_DEVICE(0x8020, 0xef04), .driver_info = BRIDGE_OVFX2 },
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
.reset_resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
module_param(frame_rate, int, 0644);
MODULE_PARM_DESC(frame_rate, "Frame rate (5, 10, 15, 20 or 30 fps)");
| ./CrossVul/dataset_final_sorted/CWE-476/c/bad_3958_0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.